diff --git a/.gitignore b/.gitignore
index d2f69ad4e8759839082a38d4558a5ab7bac0e18d..798b187d4b326fb24bd4ccccabf7dc84b9bb20fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 *~
 *.hdf5
+*.dirstamp
 
 Makefile
 Makefile.in
@@ -23,33 +24,25 @@ doc/Doxyfile
 
 examples/swift
 examples/swift_mpi
-examples/*/*.xmf
-examples/*/*.h5
-examples/*/*.png
-examples/*/*.mp4
-examples/*/*.txt
-examples/*/*.dot
-examples/*/restart/*
-examples/*/used_parameters.yml
-examples/*/unused_parameters.yml
 examples/*/*/*.xmf
 examples/*/*/*.png
 examples/*/*/*.mp4
 examples/*/*/*.txt
-examples/*/*/*.dot
 examples/*/*/*.rst
 examples/*/*/*.hdf5
-examples/*/snapshots*
-examples/*/restart/*
+examples/*/*/*.csv
+examples/*/*/*.dot
+examples/*/*/restart/*
 examples/*/*/used_parameters.yml
-examples/*/err_file*
-examples/*/out_file*
-examples/*/stf_output*
-examples/*/stf_ouput*
-examples/*/log*
+examples/*/*/log*
 examples/*/*/unused_parameters.yml
 examples/*/*.mpg
-examples/*/gravity_checks_*.dat
+examples/*/*/gravity_checks_*.dat
+examples/*/*/coolingtables.tar.gz
+examples/*/*/coolingtables
+examples/Cooling/CoolingRates/cooling_rates
+examples/Cooling/CoolingRates/cooling_element_*.dat
+examples/Cooling/CoolingRates/cooling_output.dat
 
 tests/testActivePair
 tests/testActivePair.sh
@@ -63,12 +56,18 @@ tests/brute_force_perturbed.dat
 tests/swift_dopair_perturbed.dat
 tests/test27cells
 tests/test27cells_subset
+tests/test27cellsStars
+tests/test27cellsStars_subset
 tests/testPeriodicBC
 tests/test125cells
 tests/brute_force_27_standard.dat
 tests/swift_dopair_27_standard.dat
 tests/brute_force_27_perturbed.dat
 tests/swift_dopair_27_perturbed.dat
+tests/star_brute_force_27_standard.dat
+tests/swift_star_dopair_27_standard.dat
+tests/star_brute_force_27_perturbed.dat
+tests/swift_star_dopair_27_perturbed.dat
 tests/brute_force_125_standard.dat
 tests/swift_dopair_125_standard.dat
 tests/brute_force_125_perturbed.dat
@@ -99,6 +98,7 @@ tests/testInteractions
 tests/testInteractions.sh
 tests/testSymmetry
 tests/testMaths
+tests/testRandom
 tests/testThreadpool
 tests/testParser
 tests/parser_output.yml
@@ -106,6 +106,8 @@ tests/testPeriodicBC.sh
 tests/testPeriodicBCPerturbed.sh
 tests/test27cells.sh
 tests/test27cellsPerturbed.sh
+tests/test27cellsStars.sh
+tests/test27cellsStarsPerturbed.sh
 tests/test125cells.sh
 tests/test125cellsPerturbed.sh
 tests/testParser.sh
@@ -131,7 +133,13 @@ tests/testEOS
 tests/testEOS*.txt
 tests/testEOS*.png
 tests/testUtilities
+tests/testCosmology
+tests/testOutputList
 tests/testCbrt
+tests/testFormat.sh
+tests/testCooling
+tests/*.png
+tests/*.txt
 
 theory/latex/swift.pdf
 theory/SPH/Kernels/kernels.pdf
@@ -148,6 +156,7 @@ theory/Multipoles/potential_long.pdf
 theory/Multipoles/potential_short.pdf
 theory/Multipoles/force_short.pdf
 theory/Cosmology/cosmology.pdf
+theory/Cooling/eagle_cooling.pdf
 
 m4/libtool.m4
 m4/ltoptions.m4
diff --git a/AUTHORS b/AUTHORS
index 7cbdaffcf7813aae0488a4f284c789cf6f3e30d9..3bbcc3c251d52bfcf372e807e9e5c3d02ea30ca5 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -12,3 +12,4 @@ Stefan Arridge		stefan.arridge@durham.ac.uk
 Josh Borrow             joshua.borrow@durham.ac.uk
 Loic Hausammann		loic.hausammann@epfl.ch
 Yves Revaz   		yves.revaz@epfl.ch
+Jacob Kegerreis         jacob.kegerreis@durham.ac.uk
diff --git a/INSTALL.swift b/INSTALL.swift
index db6c6677b202e55e76114373f3e037cf50de10cc..4fa82c60838cf417961682318095f090f1bb709f 100644
--- a/INSTALL.swift
+++ b/INSTALL.swift
@@ -138,14 +138,15 @@ before you can build it.
                            =====================
 
 
- - METIS:
-	a build of the METIS library can be optionally used to
-        optimize the load between MPI nodes (requires an MPI
-        library). This should be found in the standard installation
-        directories, or pointed at using the "--with-metis"
-        configuration option.  In this case the top-level installation
-        directory of the METIS build should be given. Note to use
-        METIS you should supply at least "--with-metis".
+ - METIS/ParMETIS:
+	a build of the METIS or ParMETIS library should be used to
+        optimize the load between MPI nodes. This should be found in the
+        standard installation directories, or pointed at using the
+        "--with-metis" or "--with-parmetis" configuration options.
+        In this case the top-level installation directory of the build
+        should be given. Note to use METIS or ParMETIS you should supply at
+        least "--with-metis". ParMETIS is preferred over METIS when there
+        is a choice.
 
 - libNUMA:
 	a build of the NUMA library can be used to pin the threads to
@@ -154,6 +155,12 @@ before you can build it.
         distributing the threads among the different cores on each
         computing node.
 
+        Note that if you have libNUMA outside of the system include
+        directories it may fail to compile as the headers do not pass
+        the -Wstrict-prototype check of GCC. In that case you will need
+        to use --enable-compiler-warnings=yes configure option to stop
+        this being an error.
+
  - tcmalloc / jemalloc / TBBmalloc:
 	a build of the tcmalloc library (part of gperftools), jemalloc
 	or TBBmalloc can be used be used to obtain faster and more
diff --git a/Makefile.am b/Makefile.am
index fb4eb5f6d6b63a7d0e034e0a3202ac61066e6e25..c71cc8d00c797f0e2afc034cb1abfff7eba14c88 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -19,7 +19,10 @@
 ACLOCAL_AMFLAGS = -I m4
 
 # Show the way...
-SUBDIRS = src examples doc tests
+SUBDIRS = src argparse examples doc tests tools
+if HAVEEAGLECOOLING
+SUBDIRS += examples/Cooling/CoolingRates
+endif
 
 # Non-standard files that should be part of the distribution.
 EXTRA_DIST = INSTALL.swift .clang-format format.sh
diff --git a/README b/README
index b9209684d65c826ce94812871495743dc8e5cfba..7060589401d80e205733fb5770f258708263d966 100644
--- a/README
+++ b/README
@@ -1,9 +1,9 @@
  Welcome to the cosmological hydrodynamical code
     ______       _________________
    / ___/ |     / /  _/ ___/_  __/
-   \__ \| | /| / // // /_   / /   
-  ___/ /| |/ |/ // // __/  / /    
- /____/ |__/|__/___/_/    /_/     
+   \__ \| | /| / // // /_   / /
+  ___/ /| |/ |/ // // __/  / /
+ /____/ |__/|__/___/_/    /_/
  SPH With Inter-dependent Fine-grained Tasking
 
  Website: www.swiftsim.com
@@ -11,38 +11,63 @@
 
 See INSTALL.swift for install instructions.
 
-Usage: swift [OPTION]... PARAMFILE
-       swift_mpi [OPTION]... PARAMFILE
+Usage: swift [options] [[--] param-file]
+   or: swift [options] param-file
+   or: swift_mpi [options] [[--] param-file]
+   or: swift_mpi [options] param-file
 
-Valid options are:
-  -a                Pin runners using processor affinity.
-  -c                Run with cosmological time integration.
-  -C                Run with cooling.
-  -d                Dry run. Read the parameter file, allocate memory but does not read
-                    the particles from ICs and exit before the start of time integration.
-                    Allows user to check validity of parameter and IC files as well as memory limits.
-  -D                Always drift all particles even the ones far from active particles. This emulates
-                    Gadget-[23] and GIZMO's default behaviours.
-  -e                Enable floating-point exceptions (debugging mode).
-  -f          {int} Overwrite the CPU frequency (Hz) to be used for time measurements.
-  -g                Run with an external gravitational potential.
-  -G                Run with self-gravity.
-  -M                Reconstruct the multipoles every time-step.
-  -n          {int} Execute a fixed number of time steps. When unset use the time_end parameter to stop.
-  -o          {str} Generate a default output parameter file.
-  -P  {sec:par:val} Set parameter value and overwrites values read from the parameters file. Can be used more than once.
-  -r                Continue using restart files.
-  -s                Run with hydrodynamics.
-  -S                Run with stars.
-  -t          {int} The number of threads to use on each MPI rank. Defaults to 1 if not specified.
-  -T                Print timers every time-step.
-  -v           [12] Increase the level of verbosity:
-                    1: MPI-rank 0 writes,
-                    2: All MPI-ranks write.
-  -x                Run with structure finding.
-  -y          {int} Time-step frequency at which task graphs are dumped.
-  -Y          {int} Time-step frequency at which threadpool tasks are dumped.
-  -h                Print this help message and exit.
+Parameters:
 
-See the file parameter_example.yml for an example of parameter file.
+    -h, --help                        show this help message and exit
 
+  Simulation options:
+  
+    -b, --feedback                    Run with stars feedback.
+    -c, --cosmology                   Run with cosmological time integration.
+    --temperature                     Run with temperature calculation.
+    -C, --cooling                     Run with cooling (also switches on --with-temperature).
+    -D, --drift-all                   Always drift all particles even the ones
+                                      far from active particles. This emulates
+                                      Gadget-[23] and GIZMO's default behaviours.
+    -F, --star-formation	      Run with star formation.
+    -g, --external-gravity            Run with an external gravitational potential.
+    -G, --self-gravity                Run with self-gravity.
+    -M, --multipole-reconstruction    Reconstruct the multipoles every time-step.
+    -s, --hydro                       Run with hydrodynamics.
+    -S, --stars                       Run with stars.
+    -x, --velociraptor                Run with structure finding.
+    --limiter                         Run with time-step limiter.
+    
+  Control options:
+  
+    -a, --pin                         Pin runners using processor affinity.
+    -d, --dry-run                     Dry run. Read the parameter file, allocates
+                                      memory but does not read the particles
+                                      from ICs. Exits before the start of time
+                                      integration. Checks the validity of
+                                      parameters and IC files as well as memory
+                                      limits.
+    -e, --fpe                         Enable floating-point exceptions (debugging
+                                      mode).
+    -f, --cpu-frequency=<str>         Overwrite the CPU frequency (Hz) to be
+                                      used for time measurements.
+    -n, --steps=<int>                 Execute a fixed number of time steps.
+                                      When unset use the time_end parameter
+                                      to stop.
+    -o, --output-params=<str>         Generate a default output parameter
+                                      file.
+    -P, --param=<str>                 Set parameter value, overiding the value
+                                      read from the parameter file. Can be used
+                                      more than once {sec:par:value}.
+    -r, --restart                     Continue using restart files.
+    -t, --threads=<int>               The number of threads to use on each MPI
+                                      rank. Defaults to 1 if not specified.
+    -T, --timers=<int>                Print timers every time-step.
+    -v, --verbose=<int>               Run in verbose mode, in MPI mode 2 outputs
+                                      from all ranks.
+    -y, --task-dumps=<int>            Time-step frequency at which task analysis
+                                      files and/or tasks are dumped.
+    -Y, --threadpool-dumps=<int>      Time-step frequency at which threadpool
+                                      tasks are dumped.
+
+See the file examples/parameter_example.yml for an example of parameter file.
diff --git a/README.md b/README.md
index 25f8e14b5b881149270a7e7b8a14ffe9535149ef..c160a21adb921da79ae660196d5fa33e20af74fc 100644
--- a/README.md
+++ b/README.md
@@ -49,9 +49,9 @@ are highly encouraged.
  Welcome to the cosmological hydrodynamical code
     ______       _________________
    / ___/ |     / /  _/ ___/_  __/
-   \__ \| | /| / // // /_   / /   
-  ___/ /| |/ |/ // // __/  / /    
- /____/ |__/|__/___/_/    /_/     
+   \__ \| | /| / // // /_   / /
+  ___/ /| |/ |/ // // __/  / /
+ /____/ |__/|__/___/_/    /_/
  SPH With Inter-dependent Fine-grained Tasking
 
  Website: www.swiftsim.com
@@ -59,38 +59,63 @@ are highly encouraged.
 
 See INSTALL.swift for install instructions.
 
-Usage: swift [OPTION]... PARAMFILE
-       swift_mpi [OPTION]... PARAMFILE
-
-Valid options are:
-  -a                Pin runners using processor affinity.
-  -c                Run with cosmological time integration.
-  -C                Run with cooling.
-  -d                Dry run. Read the parameter file, allocate memory but does not read
-                    the particles from ICs and exit before the start of time integration.
-                    Allows user to check validity of parameter and IC files as well as memory limits.
-  -D                Always drift all particles even the ones far from active particles. This emulates
-                    Gadget-[23] and GIZMO's default behaviours.
-  -e                Enable floating-point exceptions (debugging mode).
-  -f          {int} Overwrite the CPU frequency (Hz) to be used for time measurements.
-  -g                Run with an external gravitational potential.
-  -G                Run with self-gravity.
-  -M                Reconstruct the multipoles every time-step.
-  -n          {int} Execute a fixed number of time steps. When unset use the time_end parameter to stop.
-  -o          {str} Generate a default output parameter file.
-  -P  {sec:par:val} Set parameter value and overwrites values read from the parameters file. Can be used more than once.
-  -r                Continue using restart files.
-  -s                Run with hydrodynamics.
-  -S                Run with stars.
-  -t          {int} The number of threads to use on each MPI rank. Defaults to 1 if not specified.
-  -T                Print timers every time-step.
-  -v           [12] Increase the level of verbosity:
-                    1: MPI-rank 0 writes,
-                    2: All MPI-ranks write.
-  -x                Run with structure finding.
-  -y          {int} Time-step frequency at which task graphs are dumped.
-  -Y          {int} Time-step frequency at which threadpool tasks are dumped.
-  -h                Print this help message and exit.
+Usage: swift [options] [[--] param-file]
+   or: swift [options] param-file
+   or: swift_mpi [options] [[--] param-file]
+   or: swift_mpi [options] param-file
+
+Parameters:
+
+    -h, --help                        show this help message and exit
+
+  Simulation options:
+  
+    -b, --feedback                    Run with stars feedback.
+    -c, --cosmology                   Run with cosmological time integration.
+    --temperature                     Run with temperature calculation.
+    -C, --cooling                     Run with cooling (also switches on --with-temperature).
+    -D, --drift-all                   Always drift all particles even the ones
+                                      far from active particles. This emulates
+                                      Gadget-[23] and GIZMO's default behaviours.
+    -F, --star-formation	      Run with star formation.
+    -g, --external-gravity            Run with an external gravitational potential.
+    -G, --self-gravity                Run with self-gravity.
+    -M, --multipole-reconstruction    Reconstruct the multipoles every time-step.
+    -s, --hydro                       Run with hydrodynamics.
+    -S, --stars                       Run with stars.
+    -x, --velociraptor                Run with structure finding.
+    --limiter                         Run with time-step limiter.
+
+  Control options:
+  
+    -a, --pin                         Pin runners using processor affinity.
+    -d, --dry-run                     Dry run. Read the parameter file, allocates
+                                      memory but does not read the particles
+                                      from ICs. Exits before the start of time
+                                      integration. Checks the validity of
+                                      parameters and IC files as well as memory
+                                      limits.
+    -e, --fpe                         Enable floating-point exceptions (debugging
+                                      mode).
+    -f, --cpu-frequency=<str>         Overwrite the CPU frequency (Hz) to be
+                                      used for time measurements.
+    -n, --steps=<int>                 Execute a fixed number of time steps.
+                                      When unset use the time_end parameter
+                                      to stop.
+    -o, --output-params=<str>         Generate a default output parameter
+                                      file.
+    -P, --param=<str>                 Set parameter value, overiding the value
+                                      read from the parameter file. Can be used
+                                      more than once {sec:par:value}.
+    -r, --restart                     Continue using restart files.
+    -t, --threads=<int>               The number of threads to use on each MPI
+                                      rank. Defaults to 1 if not specified.
+    -T, --timers=<int>                Print timers every time-step.
+    -v, --verbose=<int>               Run in verbose mode, in MPI mode 2 outputs
+                                      from all ranks.
+    -y, --task-dumps=<int>            Time-step frequency at which task analysis
+                                      files and/or tasks are dumped.
+    -Y, --threadpool-dumps=<int>      Time-step frequency at which threadpool
+                                      tasks are dumped.
 
 See the file examples/parameter_example.yml for an example of parameter file.
-```
diff --git a/argparse/FAQs.md b/argparse/FAQs.md
new file mode 100644
index 0000000000000000000000000000000000000000..c760807070b192c33e624e3f98af4cd24fe16fca
--- /dev/null
+++ b/argparse/FAQs.md
@@ -0,0 +1,36 @@
+# FAQs
+
+## Why removing parsed command-line switches/options?
+
+It destroys the original `argv` array, not compatible with other arguments parsing
+library.
+
+This is because this library is used for short-lived programs, e.g. cli tools
+at beginning. It's very convenient to process remain arguments if we remove
+parsed command-line arguments,  e.g. `<comamnd> [-[s]|--switch]... arguments`.
+
+If you want keep original `argc/argv`, you can make a copy, then pass them to
+`argparse_parse`, e.g.
+
+```c
+int copy_argc          = argc;
+const char **copy_argv = argv;
+copy_argv  = malloc(copy_argc * sizeof(char *));
+for (int i = 0; i < argc; i++) {
+	copy_argv[i] = (char *)argv[i];
+}   
+argparse_parse(&argparse, copy_argc, copy_argv);
+```
+
+Issues:
+
+- https://github.com/cofyc/argparse/issues/3
+- https://github.com/cofyc/argparse/issues/9
+
+## Why using `intptr_t` to hold associated data? Why not `void *`?
+
+I choose `intptr_t` because it's a integer type which also can be used to hold
+a pointer value.  Most of the time, we only need a integer to hold
+user-provided value, see `OPT_BIT` as example. If you want to provide a pointer
+which points to a large amount of data, you can cast it to `intptr_t` and cast
+it back to original pointer in callback function.
diff --git a/argparse/LICENSE b/argparse/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..3c777497645ca9998899db5d8a8041e9831a4604
--- /dev/null
+++ b/argparse/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2012-2013 Yecheng Fu <cofyc.jackson@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/argparse/Makefile.am b/argparse/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..2fa6fb9e9ef4c014697a2c434cd86741cc74d79c
--- /dev/null
+++ b/argparse/Makefile.am
@@ -0,0 +1,28 @@
+# This file is part of SWIFT.
+# Copyright (c) 2018 Peter W. Draper (p.w.draper@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+lib_LTLIBRARIES = libargparse.la
+
+include_HEADERS = argparse.h
+
+AM_SOURCES = argparse.c
+
+# Sources and flags for regular library
+libargparse_la_SOURCES = $(AM_SOURCES)
+libargparse_la_CFLAGS = $(AM_CFLAGS)
+libargparse_la_LDFLAGS = $(AM_LDFLAGS)
+
+EXTRA_DIST = LICENSE README.md
diff --git a/argparse/OWNERS b/argparse/OWNERS
new file mode 100644
index 0000000000000000000000000000000000000000..8cad69dd488010bcaa66ed80d5e3d425f647064c
--- /dev/null
+++ b/argparse/OWNERS
@@ -0,0 +1,2 @@
+approvers:
+- cofyc
diff --git a/argparse/README.md b/argparse/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ddf66b4777e9baaf68be270df3e8994cb96a2998
--- /dev/null
+++ b/argparse/README.md
@@ -0,0 +1,103 @@
+# argparse [![Build Status](https://travis-ci.org/cofyc/argparse.png)](https://travis-ci.org/cofyc/argparse)
+
+argparse - A command line arguments parsing library in C (compatible with C++).
+
+## Description
+
+This module is inspired by parse-options.c (git) and python's argparse
+module.
+
+Arguments parsing is common task in cli program, but traditional `getopt`
+libraries are not easy to use. This library provides high-level arguments
+parsing solutions.
+
+The program defines what arguments it requires, and `argparse` will figure
+out how to parse those out of `argc` and `argv`, it also automatically
+generates help and usage messages and issues errors when users give the
+program invalid arguments.
+
+## Features
+
+ - handles both optional and positional arguments
+ - produces highly informative usage messages
+ - issues errors when given invalid arguments
+
+There are basically three types of options:
+
+ - boolean options
+ - options with mandatory argument
+ - options with optional argument
+
+There are basically two forms of options:
+
+ - short option consist of one dash (`-`) and one alphanumeric character.
+ - long option begin with two dashes (`--`) and some alphanumeric characters.
+
+Short options may be bundled, e.g. `-a -b` can be specified as `-ab`.
+
+Options are case-sensitive.
+
+Options and non-option arguments can clearly be separated using the `--` option.
+
+## Examples
+
+```c
+#include "argparse.h"
+
+static const char *const usage[] = {
+    "test_argparse [options] [[--] args]",
+    "test_argparse [options]",
+    NULL,
+};
+
+#define PERM_READ  (1<<0)
+#define PERM_WRITE (1<<1)
+#define PERM_EXEC  (1<<2)
+
+int
+main(int argc, const char **argv)
+{
+    int force = 0;
+    int test = 0;
+    int num = 0;
+    const char *path = NULL;
+    int perms = 0;
+    struct argparse_option options[] = {
+        OPT_HELP(),
+        OPT_GROUP("Basic options"),
+        OPT_BOOLEAN('f', "force", &force, "force to do"),
+        OPT_BOOLEAN('t', "test", &test, "test only"),
+        OPT_STRING('p', "path", &path, "path to read"),
+        OPT_INTEGER('n', "num", &num, "selected num"),
+        OPT_GROUP("Bits options"),
+        OPT_BIT(0, "read", &perms, "read perm", NULL, PERM_READ, OPT_NONEG),
+        OPT_BIT(0, "write", &perms, "write perm", NULL, PERM_WRITE),
+        OPT_BIT(0, "exec", &perms, "exec perm", NULL, PERM_EXEC),
+        OPT_END(),
+    };
+
+    struct argparse argparse;
+    argparse_init(&argparse, options, usage, 0);
+    argparse_describe(&argparse, "\nA brief description of what the program does and how it works.", "\nAdditional description of the program after the description of the arguments.");
+    argc = argparse_parse(&argparse, argc, argv);
+    if (force != 0)
+        printf("force: %d\n", force);
+    if (test != 0)
+        printf("test: %d\n", test);
+    if (path != NULL)
+        printf("path: %s\n", path);
+    if (num != 0)
+        printf("num: %d\n", num);
+    if (argc != 0) {
+        printf("argc: %d\n", argc);
+        int i;
+        for (i = 0; i < argc; i++) {
+            printf("argv[%d]: %s\n", i, *(argv + i));
+        }
+    }
+    if (perms) {
+        printf("perms: %d\n", perms);
+    }
+    return 0;
+}
+```
diff --git a/argparse/argparse.c b/argparse/argparse.c
new file mode 100644
index 0000000000000000000000000000000000000000..60d03518e3e0a82e035e0fe81abdc7ca27e13ef7
--- /dev/null
+++ b/argparse/argparse.c
@@ -0,0 +1,382 @@
+/**
+ * Copyright (C) 2012-2015 Yecheng Fu <cofyc.jackson at gmail dot com>
+ * All rights reserved.
+ *
+ * Use of this source code is governed by a MIT-style license that can be found
+ * in the LICENSE file.
+ */
+#include "../config.h"
+
+#include "argparse.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define OPT_UNSET 1
+#define OPT_LONG (1 << 1)
+
+static const char *prefix_skip(const char *str, const char *prefix) {
+  size_t len = strlen(prefix);
+  return strncmp(str, prefix, len) ? NULL : str + len;
+}
+
+static int prefix_cmp(const char *str, const char *prefix) {
+  for (;; str++, prefix++)
+    if (!*prefix) {
+      return 0;
+    } else if (*str != *prefix) {
+      return (unsigned char)*prefix - (unsigned char)*str;
+    }
+}
+
+static void argparse_error(struct argparse *self,
+                           const struct argparse_option *opt,
+                           const char *reason, int flags) {
+  (void)self;
+  if (flags & OPT_LONG) {
+    fprintf(stderr, "error: option `--%s` %s\n", opt->long_name, reason);
+  } else {
+    fprintf(stderr, "error: option `-%c` %s\n", opt->short_name, reason);
+  }
+  exit(1);
+}
+
+static int argparse_getvalue(struct argparse *self,
+                             const struct argparse_option *opt, int flags) {
+  const char *s = NULL;
+  if (!opt->value) goto skipped;
+  switch (opt->type) {
+    case ARGPARSE_OPT_BOOLEAN:
+      if (flags & OPT_UNSET) {
+        *(int *)opt->value = *(int *)opt->value - 1;
+      } else {
+        *(int *)opt->value = *(int *)opt->value + 1;
+      }
+      if (*(int *)opt->value < 0) {
+        *(int *)opt->value = 0;
+      }
+      break;
+    case ARGPARSE_OPT_BIT:
+      if (flags & OPT_UNSET) {
+        *(int *)opt->value &= ~opt->data;
+      } else {
+        *(int *)opt->value |= opt->data;
+      }
+      break;
+    case ARGPARSE_OPT_STRING:
+      if (self->optvalue) {
+        *(const char **)opt->value = self->optvalue;
+        self->optvalue = NULL;
+      } else if (self->argc > 1) {
+        self->argc--;
+        *(const char **)opt->value = *++self->argv;
+      } else {
+        argparse_error(self, opt, "requires a value", flags);
+      }
+      break;
+    case ARGPARSE_OPT_INTEGER:
+      errno = 0;
+      if (self->optvalue) {
+        *(int *)opt->value = strtol(self->optvalue, (char **)&s, 0);
+        self->optvalue = NULL;
+      } else if (self->argc > 1) {
+        self->argc--;
+        *(int *)opt->value = strtol(*++self->argv, (char **)&s, 0);
+      } else {
+        argparse_error(self, opt, "requires a value", flags);
+      }
+      if (errno) argparse_error(self, opt, strerror(errno), flags);
+      if (s[0] != '\0')
+        argparse_error(self, opt, "expects an integer value", flags);
+      break;
+    case ARGPARSE_OPT_FLOAT:
+      errno = 0;
+      if (self->optvalue) {
+        *(float *)opt->value = strtof(self->optvalue, (char **)&s);
+        self->optvalue = NULL;
+      } else if (self->argc > 1) {
+        self->argc--;
+        *(float *)opt->value = strtof(*++self->argv, (char **)&s);
+      } else {
+        argparse_error(self, opt, "requires a value", flags);
+      }
+      if (errno) argparse_error(self, opt, strerror(errno), flags);
+      if (s[0] != '\0')
+        argparse_error(self, opt, "expects a numerical value", flags);
+      break;
+    default:
+      assert(0);
+  }
+
+skipped:
+  if (opt->callback) {
+    return opt->callback(self, opt);
+  }
+
+  return 0;
+}
+
+static void argparse_options_check(const struct argparse_option *options) {
+  for (; options->type != ARGPARSE_OPT_END; options++) {
+    switch (options->type) {
+      case ARGPARSE_OPT_END:
+      case ARGPARSE_OPT_BOOLEAN:
+      case ARGPARSE_OPT_BIT:
+      case ARGPARSE_OPT_INTEGER:
+      case ARGPARSE_OPT_FLOAT:
+      case ARGPARSE_OPT_STRING:
+      case ARGPARSE_OPT_GROUP:
+        continue;
+      default:
+        fprintf(stderr, "wrong option type: %d", options->type);
+        break;
+    }
+  }
+}
+
+static int argparse_short_opt(struct argparse *self,
+                              const struct argparse_option *options) {
+  for (; options->type != ARGPARSE_OPT_END; options++) {
+    if (options->short_name == *self->optvalue) {
+      self->optvalue = self->optvalue[1] ? self->optvalue + 1 : NULL;
+      return argparse_getvalue(self, options, 0);
+    }
+  }
+  return -2;
+}
+
+static int argparse_long_opt(struct argparse *self,
+                             const struct argparse_option *options) {
+  for (; options->type != ARGPARSE_OPT_END; options++) {
+    const char *rest;
+    int opt_flags = 0;
+    if (!options->long_name) continue;
+
+    rest = prefix_skip(self->argv[0] + 2, options->long_name);
+    if (!rest) {
+      // negation disabled?
+      if (options->flags & OPT_NONEG) {
+        continue;
+      }
+      // only OPT_BOOLEAN/OPT_BIT supports negation
+      if (options->type != ARGPARSE_OPT_BOOLEAN &&
+          options->type != ARGPARSE_OPT_BIT) {
+        continue;
+      }
+
+      if (prefix_cmp(self->argv[0] + 2, "no-")) {
+        continue;
+      }
+      rest = prefix_skip(self->argv[0] + 2 + 3, options->long_name);
+      if (!rest) continue;
+      opt_flags |= OPT_UNSET;
+    }
+    if (*rest) {
+      if (*rest != '=') continue;
+      self->optvalue = rest + 1;
+    }
+    return argparse_getvalue(self, options, opt_flags | OPT_LONG);
+  }
+  return -2;
+}
+
+int argparse_init(struct argparse *self, struct argparse_option *options,
+                  const char *const *usages, int flags) {
+  memset(self, 0, sizeof(*self));
+  self->options = options;
+  self->usages = usages;
+  self->flags = flags;
+  self->description = NULL;
+  self->epilog = NULL;
+  return 0;
+}
+
+void argparse_describe(struct argparse *self, const char *description,
+                       const char *epilog) {
+  self->description = description;
+  self->epilog = epilog;
+}
+
+int argparse_parse(struct argparse *self, int argc, const char **argv) {
+  self->argc = argc - 1;
+  self->argv = argv + 1;
+  self->out = argv;
+
+  argparse_options_check(self->options);
+
+  for (; self->argc; self->argc--, self->argv++) {
+    const char *arg = self->argv[0];
+    if (arg[0] != '-' || !arg[1]) {
+      if (self->flags & ARGPARSE_STOP_AT_NON_OPTION) {
+        goto end;
+      }
+      // if it's not option or is a single char '-', copy verbatim
+      self->out[self->cpidx++] = self->argv[0];
+      continue;
+    }
+    // short option
+    if (arg[1] != '-') {
+      self->optvalue = arg + 1;
+      switch (argparse_short_opt(self, self->options)) {
+        case -1:
+          break;
+        case -2:
+          goto unknown;
+      }
+      while (self->optvalue) {
+        switch (argparse_short_opt(self, self->options)) {
+          case -1:
+            break;
+          case -2:
+            goto unknown;
+        }
+      }
+      continue;
+    }
+    // if '--' presents
+    if (!arg[2]) {
+      self->argc--;
+      self->argv++;
+      break;
+    }
+    // long option
+    switch (argparse_long_opt(self, self->options)) {
+      case -1:
+        break;
+      case -2:
+        goto unknown;
+    }
+    continue;
+
+  unknown:
+    fprintf(stderr, "error: unknown option `%s`\n", self->argv[0]);
+    argparse_usage(self);
+    exit(1);
+  }
+
+end:
+  memmove(self->out + self->cpidx, self->argv, self->argc * sizeof(*self->out));
+  self->out[self->cpidx + self->argc] = NULL;
+
+  return self->cpidx + self->argc;
+}
+
+void argparse_usage(struct argparse *self) {
+  if (self->usages) {
+    fprintf(stdout, "Usage: %s\n", *self->usages++);
+    while (*self->usages && **self->usages)
+      fprintf(stdout, "   or: %s\n", *self->usages++);
+  } else {
+    fprintf(stdout, "Usage:\n");
+  }
+
+  // print description
+  if (self->description) fprintf(stdout, "%s\n", self->description);
+
+  fputc('\n', stdout);
+
+  const struct argparse_option *options;
+
+  // figure out best width
+  size_t usage_opts_width = 0;
+  size_t len;
+  options = self->options;
+  for (; options->type != ARGPARSE_OPT_END; options++) {
+    len = 0;
+    if ((options)->short_name) {
+      len += 2;
+    }
+    if ((options)->short_name && (options)->long_name) {
+      len += 2;  // separator ", "
+    }
+    if ((options)->long_name) {
+      len += strlen((options)->long_name) + 2;
+    }
+    if (options->type == ARGPARSE_OPT_INTEGER) {
+      len += strlen("=<int>");
+    }
+    if (options->type == ARGPARSE_OPT_FLOAT) {
+      len += strlen("=<flt>");
+    } else if (options->type == ARGPARSE_OPT_STRING) {
+      len += strlen("=<str>");
+    }
+    len = (len + 3) - ((len + 3) & 3);
+    if (usage_opts_width < len) {
+      usage_opts_width = len;
+    }
+  }
+  usage_opts_width += 4;  // 4 spaces prefix
+
+  options = self->options;
+  for (; options->type != ARGPARSE_OPT_END; options++) {
+    size_t pos = 0;
+    int pad = 0;
+    if (options->type == ARGPARSE_OPT_GROUP) {
+      fputc('\n', stdout);
+      fprintf(stdout, "%s", options->help);
+      fputc('\n', stdout);
+      continue;
+    }
+    pos = fprintf(stdout, "    ");
+    if (options->short_name) {
+      pos += fprintf(stdout, "-%c", options->short_name);
+    }
+    if (options->long_name && options->short_name) {
+      pos += fprintf(stdout, ", ");
+    }
+    if (options->long_name) {
+      pos += fprintf(stdout, "--%s", options->long_name);
+    }
+    if (options->type == ARGPARSE_OPT_INTEGER) {
+      pos += fprintf(stdout, "=<int>");
+    }
+    if (options->type == ARGPARSE_OPT_FLOAT) {
+      pos += fprintf(stdout, "=<flt>");
+    } else if (options->type == ARGPARSE_OPT_STRING) {
+      pos += fprintf(stdout, "=<str>");
+    }
+    if (pos <= usage_opts_width) {
+      pad = usage_opts_width - pos;
+    } else {
+      fputc('\n', stdout);
+      pad = usage_opts_width;
+    }
+    if (options->help != NULL && strlen(options->help) > 0) {
+      char *str = strdup(options->help);
+      char *token = strtok(str, " ");
+      fprintf(stdout, "%*s%s ", pad + 2, "", token);
+      int count = strlen(token);
+      int dangling = 1;
+      while ((token = strtok(NULL, " ")) != NULL) {
+        if (count == 0) {
+          fprintf(stdout, "%*s", (int)pos + pad + 2, "");
+          dangling = 1;
+        }
+        printf("%s ", token);
+        count += strlen(token);
+        if (count > 30) {
+          count = 0;
+          fprintf(stdout, "\n");
+          dangling = 0;
+        }
+      }
+      if (dangling) fprintf(stdout, "\n");
+      free(str);
+    } else {
+      fprintf(stdout, "\n");
+    }
+  }
+
+  // print epilog
+  if (self->epilog) fprintf(stdout, "%s\n", self->epilog);
+}
+
+int argparse_help_cb(struct argparse *self,
+                     const struct argparse_option *option) {
+  (void)option;
+  argparse_usage(self);
+  exit(0);
+}
diff --git a/argparse/argparse.h b/argparse/argparse.h
new file mode 100644
index 0000000000000000000000000000000000000000..186214b4bc90cea90ef141380bf0017cc50af128
--- /dev/null
+++ b/argparse/argparse.h
@@ -0,0 +1,137 @@
+/**
+ * Copyright (C) 2012-2015 Yecheng Fu <cofyc.jackson at gmail dot com>
+ * All rights reserved.
+ *
+ * Use of this source code is governed by a MIT-style license that can be found
+ * in the LICENSE file.
+ */
+#ifndef ARGPARSE_H
+#define ARGPARSE_H
+
+/* For c++ compatibility */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+struct argparse;
+struct argparse_option;
+
+typedef int argparse_callback(struct argparse *self,
+                              const struct argparse_option *option);
+
+enum argparse_flag {
+  ARGPARSE_STOP_AT_NON_OPTION = 1,
+};
+
+enum argparse_option_type {
+  /* special */
+  ARGPARSE_OPT_END,
+  ARGPARSE_OPT_GROUP,
+  /* options with no arguments */
+  ARGPARSE_OPT_BOOLEAN,
+  ARGPARSE_OPT_BIT,
+  /* options with arguments (optional or required) */
+  ARGPARSE_OPT_INTEGER,
+  ARGPARSE_OPT_FLOAT,
+  ARGPARSE_OPT_STRING,
+};
+
+enum argparse_option_flags {
+  OPT_NONEG = 1, /* disable negation */
+};
+
+/**
+ *  argparse option
+ *
+ *  `type`:
+ *    holds the type of the option, you must have an ARGPARSE_OPT_END last in
+ * your array.
+ *
+ *  `short_name`:
+ *    the character to use as a short option name, '\0' if none.
+ *
+ *  `long_name`:
+ *    the long option name, without the leading dash, NULL if none.
+ *
+ *  `value`:
+ *    stores pointer to the value to be filled.
+ *
+ *  `help`:
+ *    the short help message associated to what the option does.
+ *    Must never be NULL (except for ARGPARSE_OPT_END).
+ *
+ *  `callback`:
+ *    function is called when corresponding argument is parsed.
+ *
+ *  `data`:
+ *    associated data. Callbacks can use it like they want.
+ *
+ *  `flags`:
+ *    option flags.
+ */
+struct argparse_option {
+  enum argparse_option_type type;
+  const char short_name;
+  const char *long_name;
+  void *value;
+  const char *help;
+  argparse_callback *callback;
+  intptr_t data;
+  int flags;
+};
+
+/**
+ * argpparse
+ */
+struct argparse {
+  // user supplied
+  const struct argparse_option *options;
+  const char *const *usages;
+  int flags;
+  const char *description;  // a description after usage
+  const char *epilog;       // a description at the end
+  // internal context
+  int argc;
+  const char **argv;
+  const char **out;
+  int cpidx;
+  const char *optvalue;  // current option value
+};
+
+// built-in callbacks
+int argparse_help_cb(struct argparse *self,
+                     const struct argparse_option *option);
+
+// built-in option macros
+#define OPT_END() \
+  { ARGPARSE_OPT_END, 0, NULL, NULL, 0, NULL, 0, 0 }
+#define OPT_BOOLEAN(...) \
+  { ARGPARSE_OPT_BOOLEAN, __VA_ARGS__ }
+#define OPT_BIT(...) \
+  { ARGPARSE_OPT_BIT, __VA_ARGS__ }
+#define OPT_INTEGER(...) \
+  { ARGPARSE_OPT_INTEGER, __VA_ARGS__ }
+#define OPT_FLOAT(...) \
+  { ARGPARSE_OPT_FLOAT, __VA_ARGS__ }
+#define OPT_STRING(...) \
+  { ARGPARSE_OPT_STRING, __VA_ARGS__ }
+#define OPT_GROUP(h) \
+  { ARGPARSE_OPT_GROUP, 0, NULL, NULL, h, NULL, 0, 0 }
+#define OPT_HELP()                                                  \
+  OPT_BOOLEAN('h', "help", NULL, "show this help message and exit", \
+              argparse_help_cb, 0, 0)
+
+int argparse_init(struct argparse *self, struct argparse_option *options,
+                  const char *const *usages, int flags);
+void argparse_describe(struct argparse *self, const char *description,
+                       const char *epilog);
+int argparse_parse(struct argparse *self, int argc, const char **argv);
+void argparse_usage(struct argparse *self);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/argparse/tap-functions b/argparse/tap-functions
new file mode 100644
index 0000000000000000000000000000000000000000..84f700e644c0c09246ca747ce5c22cc884dfff6c
--- /dev/null
+++ b/argparse/tap-functions
@@ -0,0 +1,445 @@
+#!/bin/bash 
+
+
+_version='1.02'
+	
+_plan_set=0
+_no_plan=0
+_skip_all=0
+_test_died=0
+_expected_tests=0 
+_executed_tests=0 
+_failed_tests=0
+TODO=
+
+
+usage(){
+	cat <<'USAGE'
+tap-functions: A TAP-producing BASH library
+
+PLAN:
+  plan_no_plan
+  plan_skip_all [REASON]
+  plan_tests NB_TESTS
+
+TEST:
+  ok RESULT [NAME]
+  okx COMMAND
+  is RESULT EXPECTED [NAME]
+  isnt RESULT EXPECTED [NAME]
+  like RESULT PATTERN [NAME]
+  unlike RESULT PATTERN [NAME]
+  pass [NAME]
+  fail [NAME]
+
+SKIP:
+  skip [CONDITION] [REASON] [NB_TESTS=1]
+
+  skip $feature_not_present "feature not present" 2 || {
+      is $a "a"
+      is $b "b"
+  }
+
+TODO:
+  Specify TODO mode by setting $TODO:
+    TODO="not implemented yet"
+    ok $result "some not implemented test"
+    unset TODO
+
+OTHER:
+  diag MSG
+
+EXAMPLE:
+  #!/bin/bash
+
+  . tap-functions
+
+  plan_tests 7
+
+  me=$USER
+  is $USER $me "I am myself"
+  like $HOME $me "My home is mine"
+  like "`id`" $me "My id matches myself"
+
+  /bin/ls $HOME 1>&2
+  ok $? "/bin/ls $HOME"
+  # Same thing using okx shortcut
+  okx /bin/ls $HOME
+
+  [[ "`id -u`" != "0" ]]
+  i_am_not_root=$?
+  skip $i_am_not_root "Must be root" || {
+    okx ls /root
+  }
+
+  TODO="figure out how to become root..."
+  okx [ "$HOME" == "/root" ]
+  unset TODO
+USAGE
+	exit
+}
+
+opt=
+set_u=
+while getopts ":sx" opt ; do
+	case $_opt in
+        u) set_u=1 ;;
+        *) usage ;;
+    esac
+done
+shift $(( OPTIND - 1 ))
+# Don't allow uninitialized variables if requested
+[[ -n "$set_u" ]] && set -u
+unset opt set_u
+
+# Used to call _cleanup on shell exit
+trap _exit EXIT
+
+
+
+plan_no_plan(){
+	(( _plan_set != 0 )) && "You tried to plan twice!"
+
+	_plan_set=1
+	_no_plan=1
+
+	return 0
+}
+
+
+plan_skip_all(){
+	local reason=${1:-''}
+
+	(( _plan_set != 0 )) && _die "You tried to plan twice!"
+
+	_print_plan 0 "Skip $reason"
+
+	_skip_all=1
+	_plan_set=1
+	_exit 0
+
+	return 0
+}
+
+
+plan_tests(){
+	local tests=${1:?}
+
+	(( _plan_set != 0 )) && _die "You tried to plan twice!"
+	(( tests == 0 )) && _die "You said to run 0 tests!  You've got to run something."
+
+	_print_plan $tests
+	_expected_tests=$tests
+	_plan_set=1
+
+	return $tests
+}
+
+
+_print_plan(){
+	local tests=${1:?}
+	local directive=${2:-''}
+
+	echo -n "1..$tests"
+	[[ -n "$directive" ]] && echo -n " # $directive"
+	echo
+}
+
+
+pass(){
+	local name=$1
+	ok 0 "$name"
+}
+
+
+fail(){
+	local name=$1
+	ok 1 "$name"
+}
+
+
+# This is the workhorse method that actually
+# prints the tests result.
+ok(){
+	local result=${1:?}
+	local name=${2:-''}
+
+	(( _plan_set == 0 )) && _die "You tried to run a test without a plan!  Gotta have a plan."
+
+	_executed_tests=$(( $_executed_tests + 1 ))
+
+	if [[ -n "$name" ]] ; then
+		if _matches "$name" "^[0-9]+$" ; then
+			diag "    You named your test '$name'.  You shouldn't use numbers for your test names."
+			diag "    Very confusing."
+		fi
+	fi
+
+	if (( result != 0 )) ; then
+		echo -n "not "
+		_failed_tests=$(( _failed_tests + 1 ))
+	fi
+	echo -n "ok $_executed_tests"
+
+	if [[ -n "$name" ]] ; then
+		local ename=${name//\#/\\#}
+		echo -n " - $ename"
+	fi
+
+	if [[ -n "$TODO" ]] ; then
+		echo -n " # TODO $TODO" ;
+		if (( result != 0 )) ; then
+			_failed_tests=$(( _failed_tests - 1 ))
+		fi
+	fi
+
+	echo
+	if (( result != 0 )) ; then
+		local file='tap-functions'
+		local func=
+		local line=
+
+		local i=0
+		local bt=$(caller $i)
+		while _matches "$bt" "tap-functions$" ; do
+			i=$(( $i + 1 ))
+			bt=$(caller $i)
+		done
+		local backtrace=
+		eval $(caller $i | (read line func file ; echo "backtrace=\"$file:$func() at line $line.\""))
+			
+		local t=
+		[[ -n "$TODO" ]] && t="(TODO) "
+
+		if [[ -n "$name" ]] ; then
+			diag "  Failed ${t}test '$name'"
+			diag "  in $backtrace"
+		else
+			diag "  Failed ${t}test in $backtrace"
+		fi
+	fi
+
+	return $result
+}
+
+
+okx(){
+	local command="$@"
+
+	local line=
+	diag "Output of '$command':"
+	$command | while read line ; do
+		diag "$line"
+	done
+	ok ${PIPESTATUS[0]} "$command"
+}
+
+
+_equals(){
+	local result=${1:?}
+	local expected=${2:?}
+
+	if [[ "$result" == "$expected" ]] ; then
+		return 0
+	else 
+		return 1
+	fi
+}
+
+
+# Thanks to Aaron Kangas for the patch to allow regexp matching
+# under bash < 3.
+ _bash_major_version=${BASH_VERSION%%.*}
+_matches(){
+	local result=${1:?}
+	local pattern=${2:?}
+
+	if [[ -z "$result" || -z "$pattern" ]] ; then
+		return 1
+	else
+		if (( _bash_major_version >= 3 )) ; then
+			eval '[[ "$result" =~ "$pattern" ]]'
+		else
+			echo "$result" | egrep -q "$pattern"
+		fi
+	fi
+}
+
+
+_is_diag(){
+	local result=${1:?}
+	local expected=${2:?}
+
+	diag "         got: '$result'" 
+	diag "    expected: '$expected'"
+}
+
+
+is(){
+	local result=${1:?}
+	local expected=${2:?}
+	local name=${3:-''}
+
+	_equals "$result" "$expected"
+	(( $? == 0 ))
+	ok $? "$name"
+	local r=$?
+	(( r != 0 )) && _is_diag "$result" "$expected"
+	return $r 
+}
+
+
+isnt(){
+	local result=${1:?}
+	local expected=${2:?}
+	local name=${3:-''}
+
+	_equals "$result" "$expected"
+	(( $? != 0 ))
+	ok $? "$name"
+	local r=$?
+	(( r != 0 )) && _is_diag "$result" "$expected"
+	return $r 
+}
+
+
+like(){
+	local result=${1:?}
+	local pattern=${2:?}
+	local name=${3:-''}
+
+	_matches "$result" "$pattern"
+	(( $? == 0 ))
+	ok $? "$name"
+	local r=$?
+	(( r != 0 )) && diag "    '$result' doesn't match '$pattern'"
+	return $r
+}
+
+
+unlike(){
+	local result=${1:?}
+	local pattern=${2:?}
+	local name=${3:-''}
+
+	_matches "$result" "$pattern"
+	(( $? != 0 ))
+	ok $? "$name"
+	local r=$?
+	(( r != 0 )) && diag "    '$result' matches '$pattern'"
+	return $r
+}
+
+
+skip(){
+	local condition=${1:?}
+	local reason=${2:-''}
+	local n=${3:-1}
+
+	if (( condition == 0 )) ; then
+		local i=
+		for (( i=0 ; i<$n ; i++ )) ; do
+			_executed_tests=$(( _executed_tests + 1 ))
+			echo "ok $_executed_tests # skip: $reason" 
+		done
+		return 0
+	else
+		return
+	fi
+}
+
+
+diag(){
+	local msg=${1:?}
+
+	if [[ -n "$msg" ]] ; then
+		echo "# $msg"
+	fi
+	
+	return 1
+}
+
+	
+_die(){
+	local reason=${1:-'<unspecified error>'}
+
+	echo "$reason" >&2
+	_test_died=1
+	_exit 255
+}
+
+
+BAIL_OUT(){
+	local reason=${1:-''}
+
+	echo "Bail out! $reason" >&2
+	_exit 255
+}
+
+
+_cleanup(){
+	local rc=0
+
+	if (( _plan_set == 0 )) ; then
+		diag "Looks like your test died before it could output anything."
+		return $rc
+	fi
+
+	if (( _test_died != 0 )) ; then
+		diag "Looks like your test died just after $_executed_tests."
+		return $rc
+	fi
+
+	if (( _skip_all == 0 && _no_plan != 0 )) ; then
+		_print_plan $_executed_tests
+	fi
+
+	local s=
+	if (( _no_plan == 0 && _expected_tests < _executed_tests )) ; then
+		s= ; (( _expected_tests > 1 )) && s=s
+		local extra=$(( _executed_tests - _expected_tests ))
+		diag "Looks like you planned $_expected_tests test$s but ran $extra extra."
+		rc=-1 ;
+	fi
+
+	if (( _no_plan == 0 && _expected_tests > _executed_tests )) ; then
+		s= ; (( _expected_tests > 1 )) && s=s
+		diag "Looks like you planned $_expected_tests test$s but only ran $_executed_tests."
+	fi
+
+	if (( _failed_tests > 0 )) ; then
+		s= ; (( _failed_tests > 1 )) && s=s
+		diag "Looks like you failed $_failed_tests test$s of $_executed_tests."
+	fi
+
+	return $rc
+}
+
+
+_exit_status(){
+	if (( _no_plan != 0 || _plan_set == 0 )) ; then
+		return $_failed_tests
+	fi
+
+	if (( _expected_tests < _executed_tests )) ; then
+		return $(( _executed_tests - _expected_tests  ))
+	fi
+
+	return $(( _failed_tests + ( _expected_tests - _executed_tests )))
+}
+
+
+_exit(){
+	local rc=${1:-''}
+	if [[ -z "$rc" ]] ; then
+		_exit_status
+		rc=$?
+	fi
+
+	_cleanup
+	local alt_rc=$?
+	(( alt_rc != 0 )) && rc=$alt_rc
+	trap - EXIT
+	exit $rc
+}
+
diff --git a/argparse/test.sh b/argparse/test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..192357d3cc43947593b1db50f2ff46b3092340e6
--- /dev/null
+++ b/argparse/test.sh
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. tap-functions
+plan_no_plan
+
+is "$(./test_argparse -f --path=/path/to/file a 2>&1)" 'force: 1
+path: /path/to/file
+argc: 1
+argv[0]: a'
+
+is "$(./test_argparse -f -f --force --no-force 2>&1)" 'force: 2'
+
+is "$(./test_argparse -i 2>&1)" 'error: option `-i` requires a value'
+
+is "$(./test_argparse -i 2 2>&1)" 'int_num: 2'
+
+is "$(./test_argparse -i2 2>&1)" 'int_num: 2'
+
+is "$(./test_argparse -ia 2>&1)" 'error: option `-i` expects an integer value'
+
+is "$(./test_argparse -i 0xFFFFFFFFFFFFFFFFF 2>&1)" \
+   'error: option `-i` Numerical result out of range'
+
+is "$(./test_argparse -s 2.4 2>&1)" 'flt_num: 2.4'
+
+is "$(./test_argparse -s2.4 2>&1)" 'flt_num: 2.4'
+
+is "$(./test_argparse -sa 2>&1)" 'error: option `-s` expects a numerical value'
+
+is "$(./test_argparse -s 1e999 2>&1)" \
+   'error: option `-s` Numerical result out of range'
+
+is "$(./test_argparse -f -- do -f -h 2>&1)" 'force: 1
+argc: 3
+argv[0]: do
+argv[1]: -f
+argv[2]: -h'
+
+is "$(./test_argparse -tf 2>&1)" 'force: 1
+test: 1'
+
+is "$(./test_argparse --read --write 2>&1)" 'perms: 3'
+
+is "$(./test_argparse -h)" 'Usage: test_argparse [options] [[--] args]
+   or: test_argparse [options]
+
+A brief description of what the program does and how it works.
+
+    -h, --help            show this help message and exit
+
+Basic options
+    -f, --force           force to do
+    -t, --test            test only
+    -p, --path=<str>      path to read
+    -i, --int=<int>       selected integer
+    -s, --float=<flt>     selected float
+
+Bits options
+    --read                read perm
+    --write               write perm
+    --exec                exec perm
+
+Additional description of the program after the description of the arguments.'
diff --git a/argparse/test_argparse.c b/argparse/test_argparse.c
new file mode 100644
index 0000000000000000000000000000000000000000..5f411833aafa603d085258f11b8bbb35ff1c6d39
--- /dev/null
+++ b/argparse/test_argparse.c
@@ -0,0 +1,80 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "argparse.h"
+
+static const char *const usages[] = {
+    "test_argparse [options] [[--] args]",
+    "test_argparse [options]",
+    NULL,
+};
+
+#define PERM_READ (1 << 0)
+#define PERM_WRITE (1 << 1)
+#define PERM_EXEC (1 << 2)
+
+struct stuff {
+  const char *path[10];
+  int npath;
+};
+
+static int callback(struct argparse *self, const struct argparse_option *opt) {
+  printf("Called back... %s\n", *(char **)opt->value);
+  struct stuff *data = (struct stuff *)opt->data;
+  data->path[data->npath] = *(char **)opt->value;
+  data->npath++;
+  return 1;
+}
+
+int main(int argc, const char **argv) {
+  int force = 0;
+  int self_gravity = 0;
+  int int_num = 0;
+  float flt_num = 0.f;
+  struct stuff data;
+  data.npath = 0;
+  data.path[0] = NULL;
+  const char *buffer;
+  int perms = 0;
+  int npath;
+
+  struct argparse_option options[] = {
+      OPT_HELP(),
+      OPT_GROUP("Basic options"),
+      OPT_BOOLEAN('f', "force", &force, "force to do", NULL, 0, 0),
+      OPT_BOOLEAN(0, "self-gravity", &self_gravity, "use self gravity", NULL, 0,
+                  0),
+      OPT_STRING('P', "path", &buffer, "path to read", &callback,
+                 (intptr_t)&data, 0),
+      OPT_INTEGER('i', "int", &int_num, "selected integer", NULL, 0, 0),
+      OPT_FLOAT('s', "float", &flt_num, "selected float", NULL, 0, 0),
+      OPT_END(),
+  };
+
+  struct argparse argparse;
+  argparse_init(&argparse, options, usages, 0);
+  argparse_describe(
+      &argparse,
+      "\nA brief description of what the program does and how it works.",
+      "\nAdditional description of the program after the description of the "
+      "arguments.");
+  argc = argparse_parse(&argparse, argc, argv);
+  if (force != 0) printf("force: %d\n", force);
+  if (self_gravity != 0) printf("self_gravity: %d\n", self_gravity);
+  if (data.npath > 0) {
+    for (int i = 0; i < data.npath; i++) printf("path: %s\n", data.path[i]);
+  }
+  if (int_num != 0) printf("int_num: %d\n", int_num);
+  if (flt_num != 0) printf("flt_num: %g\n", flt_num);
+  if (argc != 0) {
+    printf("argc: %d\n", argc);
+    int i;
+    for (i = 0; i < argc; i++) {
+      printf("argv[%d]: %s\n", i, *(argv + i));
+    }
+  }
+  if (perms) {
+    printf("perms: %d\n", perms);
+  }
+  return 0;
+}
diff --git a/configure.ac b/configure.ac
index cd96da1f3aeee70de50deec3e5b642eb34345979..c7f1df31d6148b568c22e6ed5ebc33fc9ff73abf 100644
--- a/configure.ac
+++ b/configure.ac
@@ -16,9 +16,25 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Init the project.
-AC_INIT([SWIFT],[0.7.0],[https://gitlab.cosma.dur.ac.uk/swift/swiftsim])
+AC_INIT([SWIFT],[0.8.0],[https://gitlab.cosma.dur.ac.uk/swift/swiftsim])
 swift_config_flags="$*"
 
+#  We want to stop when given unrecognised options. No subdirs so this is safe.
+enable_option_checking=${enable_option_checking:-fatal}
+if test -n "$ac_unrecognized_opts"; then
+    case $enable_option_checking in
+        no)
+        ;;
+        fatal)
+            { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2
+              { (exit 1); exit 1; }; }
+        ;;
+        *)
+            $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2
+        ;;
+    esac
+fi
+
 AC_COPYRIGHT
 AC_CONFIG_SRCDIR([src/space.c])
 AC_CONFIG_AUX_DIR([.])
@@ -54,6 +70,19 @@ AX_COMPILER_VERSION
 #  Restrict support.
 AC_C_RESTRICT
 
+# logger
+AC_ARG_ENABLE([logger],
+	[AS_HELP_STRING([--enable-logger],
+		[enable the particle logger]
+	)],
+	[with_logger="${enableval}"],
+	[with_logger="no"]
+)
+
+if test "$with_logger" = "yes"; then
+   AC_DEFINE([WITH_LOGGER], 1, [logger enabled])
+fi
+
 # Interprocedural optimization support. Needs special handling for linking and
 # archiving as well as compilation with Intels, needs to be done before
 # libtool is configured (to use correct LD).
@@ -75,9 +104,18 @@ if test "$enable_ipo" = "yes"; then
    elif test "$ax_cv_c_compiler_vendor" = "gnu"; then
       CFLAGS="$CFLAGS -flto"
       LDFLAGS="$LDFLAGS -flto"
+      AX_COMPARE_VERSION($ax_cv_c_compiler_version, [ge], [5.0.0],
+                          [
+      : ${AR="gcc-ar"}
+      : ${RANLIB="gcc-ranlib"}
+                          ], [:] )
       AC_MSG_RESULT([added GCC interprocedural optimization support])
    elif test "$ax_cv_c_compiler_vendor" = "clang"; then
-      CFLAGS="$CFLAGS -emit-llvm"
+      CFLAGS="$CFLAGS -flto -fuse-ld=gold"
+      LDFLAGS="$LDFLAGS -XCClinker -fuse-ld=gold"
+      : ${AR="llvm-ar"}
+      : ${LD="ld.gold"}
+      : ${RANLIB="llvm-ranlib"}
       AC_MSG_RESULT([added LLVM interprocedural optimization support])
    else
       AC_MSG_WARN([Compiler does not support interprocedural optimization])
@@ -186,7 +224,7 @@ fi
 # Check if task debugging is on.
 AC_ARG_ENABLE([task-debugging],
    [AS_HELP_STRING([--enable-task-debugging],
-     [Store task timing information and generate task dump files @<:@yes/no@:>@]
+     [Store extra information for generating task dump files @<:@yes/no@:>@]
    )],
    [enable_task_debugging="$enableval"],
    [enable_task_debugging="no"]
@@ -296,6 +334,9 @@ elif test "$no_gravity_below_id" != "no"; then
    AC_DEFINE_UNQUOTED([SWIFT_NO_GRAVITY_BELOW_ID], [$enableval] ,[Particles with smaller ID than this will have zero gravity forces])
 fi
 
+# Check whether we have any of the ARM v8.1 tick timers
+AX_ASM_ARM_PMCCNTR
+AX_ASM_ARM_CNTVCT
 # See if we want memuse reporting.
 AC_ARG_ENABLE([memuse-reports],
    [AS_HELP_STRING([--enable-memuse-reports],
@@ -334,6 +375,16 @@ AC_ARG_ENABLE([vec],
    [enable_vec="yes"]
 )
 
+#  Disable hand written vectorisation. Slightly odd implementation as want
+# to describe as --disable-hand-vec, but macro is enable (there is no enable action).
+AC_ARG_ENABLE([hand-vec],
+   [AS_HELP_STRING([--disable-hand-vec],
+     [Disable intrinsic vectorization]
+   )],
+   [enable_hand_vec="$enableval"],
+   [enable_hand_vec="yes"]
+)
+
 HAVEVECTORIZATION=0
 
 if test "$enable_opt" = "yes" ; then
@@ -357,7 +408,6 @@ if test "$enable_opt" = "yes" ; then
        fi
    fi
 
-
    if test "$enable_vec" = "no"; then
       if test "$ax_cv_c_compiler_vendor" = "intel"; then
       	 CFLAGS="$CFLAGS -no-vec -no-simd"
@@ -371,8 +421,8 @@ if test "$enable_opt" = "yes" ; then
       else
          AC_MSG_WARN([Do not know how to disable vectorization for this compiler])
       fi
-   else
-      AC_DEFINE([WITH_VECTORIZATION],1,[Enable vectorization])
+   elif test "$enable_hand_vec" = "yes"; then
+      AC_DEFINE([WITH_VECTORIZATION],1,[Enable hand-written vectorization])
       HAVEVECTORIZATION=1
    fi
 fi
@@ -508,18 +558,20 @@ AC_CHECK_LIB(pthread, posix_fallocate,
 	     AC_DEFINE([HAVE_POSIX_FALLOCATE], [1], [The posix library implements file allocation functions.]),
 	     AC_MSG_WARN(POSIX implementation does not have file allocation functions.))
 
-# Check for METIS. Note AX_LIB_METIS exists, but cannot be configured
-# to be default off (i.e. given no option it tries to locate METIS), so we
-# don't use that.
+# Check for METIS.
 have_metis="no"
 AC_ARG_WITH([metis],
     [AS_HELP_STRING([--with-metis=PATH],
-       [root directory where metis is installed @<:@yes/no@:>@]
+       [root directory where METIS is installed @<:@yes/no@:>@]
     )],
     [with_metis="$withval"],
     [with_metis="no"]
 )
+
+METIS_LIBS=""
 if test "x$with_metis" != "xno"; then
+
+# Check if we have METIS.
    if test "x$with_metis" != "xyes" -a "x$with_metis" != "x"; then
       METIS_LIBS="-L$with_metis/lib -lmetis"
       METIS_INCS="-I$with_metis/include"
@@ -527,15 +579,67 @@ if test "x$with_metis" != "xno"; then
       METIS_LIBS="-lmetis"
       METIS_INCS=""
    fi
-   have_metis="yes"
-   AC_CHECK_LIB([metis],[METIS_PartGraphKway],
-      AC_DEFINE([HAVE_METIS],1,[The metis library appears to be present.]),
-      AC_MSG_ERROR(something is wrong with the metis library!),$METIS_LIBS)
+   AC_CHECK_LIB([metis],[METIS_PartGraphKway], [have_metis="yes"],
+                [have_metis="no"], $METIS_LIBS)
+   if test "$have_metis" == "yes"; then
+      AC_DEFINE([HAVE_METIS],1,[The METIS library is present.])
+   else
+      AC_MSG_ERROR("Failed to find a METIS library")
+   fi
 fi
+
 AC_SUBST([METIS_LIBS])
 AC_SUBST([METIS_INCS])
 AM_CONDITIONAL([HAVEMETIS],[test -n "$METIS_LIBS"])
 
+# Check for ParMETIS note we can have both as ParMETIS uses METIS.
+have_parmetis="no"
+AC_ARG_WITH([parmetis],
+    [AS_HELP_STRING([--with-parmetis=PATH],
+       [root directory where ParMETIS is installed @<:@yes/no@:>@]
+    )],
+    [with_parmetis="$withval"],
+    [with_parmetis="no"]
+)
+
+if test "x$with_parmetis" != "xno"; then
+
+# Check if we have ParMETIS.
+   if test "x$with_parmetis" != "xyes" -a "x$with_parmetis" != "x"; then
+      PARMETIS_LIBS="-L$with_parmetis/lib -lparmetis"
+      PARMETIS_INCS="-I$with_parmetis/include"
+   else
+      PARMETIS_LIBS="-lparmetis"
+      PARMETIS_INCS=""
+   fi
+   AC_CHECK_LIB([parmetis],[ParMETIS_V3_RefineKway], [have_parmetis="yes"],
+                [have_parmetis="no"], $PARMETIS_LIBS)
+   if test "$have_parmetis" == "no"; then
+
+# A build may use an external METIS library, check for that.
+
+      if test "x$with_parmetis" != "xyes" -a "x$with_parmetis" != "x"; then
+         PARMETIS_LIBS="-L$with_parmetis/lib -lparmetis -lmetis"
+         PARMETIS_INCS="-I$with_parmetis/include"
+      else
+         PARMETIS_LIBS="-lparmetis -lmetis"
+         PARMETIS_INCS=""
+      fi
+      AC_CHECK_LIB([parmetis],[ParMETIS_V3_RefineKway], [have_parmetis="yes"],
+                   [have_parmetis="no"], [$METIS_LIBS $PARMETIS_LIBS])
+
+   fi
+   if test "$have_parmetis" == "yes"; then
+      AC_DEFINE([HAVE_PARMETIS],1,[The ParMETIS library is present.])
+   else
+      AC_MSG_ERROR("Failed to find a ParMETIS library")
+   fi
+fi
+
+AC_SUBST([PARMETIS_LIBS])
+AC_SUBST([PARMETIS_INCS])
+AM_CONDITIONAL([HAVEPARMETIS],[test -n "$PARMETIS_LIBS"])
+
 # METIS fixed width integer printing can require this, so define. Only needed
 # for some non C99 compilers, i.e. C++ pre C++11.
 AH_VERBATIM([__STDC_FORMAT_MACROS],
@@ -546,7 +650,8 @@ AH_VERBATIM([__STDC_FORMAT_MACROS],
 
 # Check for FFTW. We test for this in the standard directories by default,
 # and only disable if using --with-fftw=no or --without-fftw. When a value
-# is given GSL must be found.
+# is given FFTW must be found.
+# If FFTW is found, we check whether this is the threaded version.
 have_fftw="no"
 AC_ARG_WITH([fftw],
     [AS_HELP_STRING([--with-fftw=PATH],
@@ -556,6 +661,8 @@ AC_ARG_WITH([fftw],
     [with_fftw="test"]
 )
 if test "x$with_fftw" != "xno"; then
+
+   # Was FFTW's location specifically given?
    if test "x$with_fftw" != "xyes" -a "x$with_fftw" != "xtest" -a "x$with_fftw" != "x"; then
       FFTW_LIBS="-L$with_fftw/lib -lfftw3"
       FFTW_INCS="-I$with_fftw/include"
@@ -563,22 +670,116 @@ if test "x$with_fftw" != "xno"; then
       FFTW_LIBS="-lfftw3"
       FFTW_INCS=""
    fi
+
    #  FFTW is not specified, so just check if we have it.
    if test "x$with_fftw" = "xtest"; then
       AC_CHECK_LIB([fftw3],[fftw_malloc],[have_fftw="yes"],[have_fftw="no"],$FFTW_LIBS)
       if test "x$have_fftw" != "xno"; then
       	 AC_DEFINE([HAVE_FFTW],1,[The FFTW library appears to be present.])
       fi
+   # FFTW was specified, check that it was a valid location.
    else
       AC_CHECK_LIB([fftw3],[fftw_malloc],
          AC_DEFINE([HAVE_FFTW],1,[The FFTW library appears to be present.]),
          AC_MSG_ERROR(something is wrong with the FFTW library!), $FFTW_LIBS)
       have_fftw="yes"
    fi
+
+   # FFTW was requested not to be used.
    if test "$have_fftw" = "no"; then
       FFTW_LIBS=""
       FFTW_INCS=""
    fi
+
+   # Now, check whether we have the threaded version of FFTW
+   if test "x$have_fftw" = "xyes"; then
+
+      # Was FFTW's location specifically given?
+      if test "x$with_fftw" != "xyes" -a "x$with_fftw" != "xtest" -a "x$with_fftw" != "x"; then
+        FFTW_THREADED_LIBS="-L$with_fftw/lib -lfftw3_threads -lfftw3"
+        FFTW_THREADED_INCS="-I$with_fftw/include"
+      else
+        FFTW_THREADED_LIBS="-lfftw3_threads -lfftw3"
+        FFTW_THREADED_INCS=""
+      fi
+
+      # Verify that the library is threaded
+      AC_CHECK_LIB([fftw3],[fftw_init_threads],[have_threaded_fftw="yes"],
+		   [have_threaded_fftw="no"], $FFTW_THREADED_LIBS)
+
+      # If found, update things
+      if test "x$have_threaded_fftw" = "xyes"; then
+         AC_DEFINE([HAVE_THREADED_FFTW],1,[The threaded FFTW library appears to be present.])
+         FFTW_LIBS=$FFTW_THREADED_LIBS
+         FFTW_INCS=$FFTW_THREADED_INCS
+	 have_fftw="yes - threaded"
+      fi
+   fi
+fi
+
+AC_ARG_WITH([arm-fftw],
+    [AS_HELP_STRING([--with-arm-fftw=PATH],
+      [root directory where arm fft library is installed @<:@yes/no@:>@]
+    )],
+    [with_arm_fftw="$withval"],
+    [with_arm_fftw=no]
+)
+if test "x$with_arm_fftw" != "xno"; then
+
+   # Was FFTW's location specifically given?
+   if test "x$with_arm_fftw" != "xyes" -a "x$with_arm_fftw" != "xtest" -a "x$with_arm_fftw" != "x"; then
+      FFTW_LIBS="-L$with_arm_fftw/lib -larmpl_lp64"
+      FFTW_INCS="-I$with_arm_fftw/include"
+   else
+      FFTW_LIBS="-larmpl_lp64"
+      FFTW_INCS=""
+   fi
+
+   #  FFTW is not specified, so just check if we have it.
+   if test "x$with_arm_fftw" = "xtest"; then
+      AC_CHECK_LIB([armpl_lp64],[fftw_malloc],[have_fftw="yes"],[have_fftw="no"],$FFTW_LIBS)
+      if test "x$have_arm_fftw" != "xno"; then
+      	 AC_DEFINE([HAVE_FFTW],1,[The FFTW library appears to be present.])
+	 have_fftw="yes - ARM"
+      fi
+   # FFTW was specified, check that it was a valid location.
+   else
+      AC_CHECK_LIB([armpl_lp64],[fftw_malloc],
+         AC_DEFINE([HAVE_FFTW],1,[The FFTW library appears to be present.]),
+         AC_MSG_ERROR(something is wrong with the FFTW library!), $FFTW_LIBS)
+      have_fftw="yes - ARM"
+   fi
+
+   # FFTW was requested not to be used.
+   if test "$have_arm_fftw" = "no"; then
+      FFTW_LIBS=""
+      FFTW_INCS=""
+   fi
+
+   # Now, check whether we have the threaded version of FFTW
+   if test "x$have_arm_fftw" = "xyes"; then
+
+      # Was FFTW's location specifically given?
+      if test "x$with_arm_fftw" != "xyes" -a "x$with_arm_fftw" != "xtest" -a "x$with_arm_fftw" != "x"; then
+        FFTW_THREADED_LIBS="-L$with_arm_fftw/lib -larmpl_lp64_threads -larmpl_lp64"
+        FFTW_THREADED_INCS="-I$with_arm_fftw/include"
+      else
+        FFTW_THREADED_LIBS="-larmpl_lp64_threads -larmpl_lp64"
+        FFTW_THREADED_INCS=""
+      fi
+
+      # Verify that the library is threaded
+      AC_CHECK_LIB([armpl_lp64],[fftw_init_threads],[have_threaded_fftw="yes"],
+                  [have_threaded_fftw="no"], $FFTW_THREADED_LIBS)
+
+      # If found, update things
+      if test "x$have_threaded_fftw" = "xyes"; then
+         AC_DEFINE([HAVE_THREADED_FFTW],1,[The threaded FFTW library appears to be present.])
+         FFTW_LIBS=$FFTW_THREADED_LIBS
+         FFTW_INCS=$FFTW_THREADED_INCS
+         have_fftw="yes - ARM - threaded"
+      fi
+   fi
 fi
 AC_SUBST([FFTW_LIBS])
 AC_SUBST([FFTW_INCS])
@@ -779,7 +980,8 @@ if test "$with_hdf5" = "yes"; then
         AC_MSG_CHECKING([for HDF5 parallel support])
 
 	# Check if the library is capable, the header should define H5_HAVE_PARALLEL.
-
+        old_CPPFLAGS="$CPPFLAGS"
+        CPPFLAGS="$CPPFLAGS $HDF5_CPPFLAGS"
         AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
         #include "hdf5.h"
         #ifndef H5_HAVE_PARALLEL
@@ -791,10 +993,48 @@ if test "$with_hdf5" = "yes"; then
             AC_DEFINE([HAVE_PARALLEL_HDF5],1,[HDF5 library supports parallel access])
         fi
         AC_MSG_RESULT($parallel)
+        CPPFLAGS="$old_CPPFLAGS"
     fi
 fi
 AM_CONDITIONAL([HAVEPARALLELHDF5],[test "$have_parallel_hdf5" = "yes"])
 
+# Check for grackle.
+have_grackle="no"
+AC_ARG_WITH([grackle],
+    [AS_HELP_STRING([--with-grackle=PATH],
+       [root directory where grackle is installed @<:@yes/no@:>@]
+    )],
+    [with_grackle="$withval"],
+    [with_grackle="no"]
+)
+if test "x$with_grackle" != "xno"; then
+   AC_PROG_FC
+   AC_FC_LIBRARY_LDFLAGS
+   if test "x$with_grackle" != "xyes" -a "x$with_grackle" != "x"; then
+      GRACKLE_LIBS="-L$with_grackle/lib -lgrackle"
+      GRACKLE_INCS="-I$with_grackle/include"
+   else
+      GRACKLE_LIBS="-lgrackle"
+      GRACKLE_INCS=""
+   fi
+
+   have_grackle="yes"
+
+   echo $GRACKLE_LIBS
+
+   AC_CHECK_LIB(
+      [grackle],
+      [initialize_chemistry_data],
+      [AC_DEFINE([HAVE_GRACKLE],1,[The GRACKLE library appears to be present.])
+        AC_DEFINE([CONFIG_BFLOAT_8],1,[Use doubles in grackle])
+      ],
+      [AC_MSG_ERROR(Cannot find grackle library!)],
+      [$GRACKLE_LIBS])
+fi
+AC_SUBST([GRACKLE_LIBS])
+AC_SUBST([GRACKLE_INCS])
+AM_CONDITIONAL([HAVEGRACKLE],[test -n "$GRACKLE_LIBS"])
+
 # Check for VELOCIraptor.
 have_velociraptor="no"
 AC_ARG_WITH([velociraptor],
@@ -808,7 +1048,7 @@ if test "x$with_velociraptor" != "xno"; then
    AC_PROG_FC
    AC_FC_LIBRARY_LDFLAGS
    if test "x$with_velociraptor" != "xyes" -a "x$with_velociraptor" != "x"; then
-      VELOCIRAPTOR_LIBS="-L$with_velociraptor -lstf -lstdc++ -lhdf5_cpp"
+      VELOCIRAPTOR_LIBS="-L$with_velociraptor -lvelociraptor -lmpi -lstdc++ -lhdf5_cpp"
       CFLAGS="$CFLAGS -fopenmp"
    else
       VELOCIRAPTOR_LIBS=""
@@ -817,7 +1057,7 @@ if test "x$with_velociraptor" != "xno"; then
    have_velociraptor="yes"
 
    AC_CHECK_LIB(
-      [stf],
+      [velociraptor],
       [InitVelociraptor],
       [AC_DEFINE([HAVE_VELOCIRAPTOR],1,[The VELOCIraptor library appears to be present.])],
       [AC_MSG_ERROR(Cannot find VELOCIraptor library at $with_velociraptor)],
@@ -827,6 +1067,22 @@ fi
 AC_SUBST([VELOCIRAPTOR_LIBS])
 AM_CONDITIONAL([HAVEVELOCIRAPTOR],[test -n "$VELOCIRAPTOR_LIBS"])
 
+# Check for dummy VELOCIraptor.
+AC_ARG_ENABLE([dummy-velociraptor],
+    [AS_HELP_STRING([--enable-dummy-velociraptor],
+       [Enable dummy velociraptor compilation @<:@yes/no@:>@]
+    )],
+    [enable_dummy_velociraptor="$enableval"],
+    [enable_dummy_velociraptor="no"]
+)
+
+if test "$enable_dummy_velociraptor" = "yes"; then
+  have_velociraptor="yes"
+
+  AC_DEFINE(HAVE_VELOCIRAPTOR,1,[The VELOCIraptor library appears to be present.])
+  AC_DEFINE(HAVE_DUMMY_VELOCIRAPTOR,1,[The dummy VELOCIraptor library is present.])
+fi
+
 # Check for floating-point execeptions
 AC_CHECK_FUNC(feenableexcept, AC_DEFINE([HAVE_FE_ENABLE_EXCEPT],[1],
     [Defined if the floating-point exception can be enabled using non-standard GNU functions.]))
@@ -837,20 +1093,67 @@ AC_CHECK_FUNC(pthread_setaffinity_np, AC_DEFINE([HAVE_SETAFFINITY],[1],
 AM_CONDITIONAL(HAVESETAFFINITY,
     [test "$ac_cv_func_pthread_setaffinity_np" = "yes"])
 
+# If available check for NUMA as well. There is a problem with the headers of
+# this library, mainly that they do not pass the strict prototypes check when
+# installed outside of the system directories. So we actually do this check
+# in two phases. The basic ones first (before strict-prototypes is added to CFLAGS).
 have_numa="no"
-if test "$ac_cv_func_pthread_setaffinity_np" = "yes"; then
-  # Check for libnuma.
-  AC_CHECK_HEADER([numa.h])
-  if test "$ac_cv_header_numa_h" = "yes"; then
-    AC_CHECK_LIB([numa], [numa_available])
-    have_numa="yes"
-  fi
-fi
+AC_ARG_WITH([numa],
+    [AS_HELP_STRING([--with-numa=PATH],
+       [Directory where the NUMA library exists @<:@yes/no@:>@]
+    )],
+    [with_numa="$withval"],
+    [with_numa="yes"]
+)
+if test "$ac_cv_func_pthread_setaffinity_np" = "yes" -a "x$with_numa" != "xno"; then
 
+    if test "x$with_numa" != "xyes" -a "x$with_numa" != "x"; then
+        NUMA_LIBS="-L$with_numa/lib -lnuma"
+        NUMA_INCS="-I$with_numa/include"
+    else
+        NUMA_LIBS="-lnuma"
+        NUMA_INCS=""
+    fi
+
+    #  Test for header file.
+    old_CPPFLAGS="$CPPFLAGS"
+    CPPFLAGS="$CPPFLAGS $NUMA_INCS"
+    AC_CHECK_HEADER([numa.h])
+    CPPFLAGS="$old_CPPFLAGS"
+    if test "$ac_cv_header_numa_h" = "yes"; then
+
+        #  If NUMA location is specified check if we have it.
+        if test "x$with_numa" != "xyes" -a "x$with_numa" != "x"; then
+            AC_CHECK_LIB([numa],[numa_available],
+                AC_DEFINE([HAVE_LIBNUMA],1,[The NUMA library appears to be present.]),
+                AC_MSG_ERROR(something is wrong with the NUMA library!), $NUMA_LIBS)
+            have_numa="yes"
+        else
+            AC_CHECK_LIB([numa],[numa_available],[have_numa="yes"],[have_numa="no"],$NUMA_LIBS)
+            if test "x$have_numa" != "xno"; then
+                AC_DEFINE([HAVE_LIBNUMA],1,[The NUMA library appears to be present.])
+            fi
+        fi
+    fi
+
+    #  We can live without this.
+    if test "$have_numa" = "no"; then
+       NUMA_LIBS=""
+    fi
+fi
+AC_SUBST([NUMA_LIBS])
 
 # Check for Intel and PowerPC intrinsics header optionally used by vector.h.
-AC_CHECK_HEADERS([immintrin.h])
-AC_CHECK_HEADERS([altivec.h])
+AC_CHECK_HEADERS([immintrin.h], [], [],
+[#ifdef HAVE_IMMINTRIN_H
+# include <immintrin.h>
+#endif
+])
+AC_CHECK_HEADERS([altivec.h], [], [],
+[#ifdef HAVE_ALTIVEC_H
+# include <altivec.h>
+#endif
+])
 
 # Check for timing functions needed by cycle.h.
 AC_HEADER_TIME
@@ -871,17 +1174,7 @@ AC_LINK_IFELSE([AC_LANG_PROGRAM(
 [AC_DEFINE(HAVE__RTC,1,[Define if you have the UNICOS _rtc() intrinsic.])],[rtc_ok=no])
 AC_MSG_RESULT($rtc_ok)
 
-# Special timers for the ARM v7 and ARM v8 platforms (taken from FFTW-3 to match their cycle.h)
-AC_ARG_ENABLE(armv8-pmccntr-el0, [AC_HELP_STRING([--enable-armv8-pmccntr-el0],[enable the cycle counter on ARMv8 via the PMCCNTR_EL0 register])], have_armv8pmccntrel0=$enableval)
-if test "$have_armv8pmccntrel0"x = "yes"x; then
-	AC_DEFINE(HAVE_ARMV8_PMCCNTR_EL0,1,[Define if you have enabled the PMCCNTR_EL0 cycle counter on ARMv8])
-fi
-
-AC_ARG_ENABLE(armv8-cntvct-el0, [AC_HELP_STRING([--enable-armv8-cntvct-el0],[enable the cycle counter on ARMv8 via the CNTVCT_EL0 register])], have_armv8cntvctel0=$enableval)
-if test "$have_armv8cntvctel0"x = "yes"x; then
-	AC_DEFINE(HAVE_ARMV8_CNTVCT_EL0,1,[Define if you have enabled the CNTVCT_EL0 cycle counter on ARMv8])
-fi
-
+# Special timers for the ARM v7 platforms (taken from FFTW-3 to match their cycle.h)
 AC_ARG_ENABLE(armv7a-cntvct, [AC_HELP_STRING([--enable-armv7a-cntvct],[enable the cycle counter on Armv7a via the CNTVCT register])], have_armv7acntvct=$enableval)
 if test "$have_armv7acntvct"x = "yes"x; then
 	AC_DEFINE(HAVE_ARMV7A_CNTVCT,1,[Define if you have enabled the CNTVCT cycle counter on ARMv7a])
@@ -933,6 +1226,35 @@ if test "$enable_warn" != "no"; then
                           [CFLAGS="$CFLAGS"],[$CFLAGS],[AC_LANG_SOURCE([int main(void){return 0;}])])
 fi
 
+# Second part of the NUMA library checks. We now decide if we need to use
+# -isystem to get around the strict-prototypes problem. Assumes isystem
+# is available when strict-prototypes is.
+if test "$have_numa" != "no"; then
+    if test "x$with_numa" != "xyes" -a "x$with_numa" != "x"; then
+        case "$CFLAGS" in
+            *strict-prototypes*)
+                NUMA_INCS="-isystem$with_numa/include"
+                # This may still fail if CPATH is used, so we check if the
+                # headers are usable.
+                AS_UNSET(ac_cv_header_numa_h)
+                old_CPPFLAGS="$CPPFLAGS"
+                CPPFLAGS="$CPPFLAGS $NUMA_INCS"
+                numa_failed="no"
+                AC_CHECK_HEADER([numa.h],[numa_failed="no"],
+                                [numa_failed="yes"])
+                if test "$numa_failed" = "yes"; then
+                    AC_MSG_ERROR([Failed to compile the numa.h header file: you may need to set --enable-compiler-warnings to yes or no])
+                fi
+                CPPFLAGS="$old_CPPFLAGS"
+            ;;
+            *)
+                NUMA_INCS="-I$with_numa/include"
+            ;;
+        esac
+   fi
+fi
+AC_SUBST([NUMA_INCS])
+
 # Various package configuration options.
 
 # Master subgrid options
@@ -950,7 +1272,11 @@ AC_ARG_WITH([subgrid],
 # Default values
 with_subgrid_cooling=none
 with_subgrid_chemistry=none
-with_subgrid_hydro=none
+with_subgrid_tracers=none
+with_subgrid_entropy_floor=none
+with_subgrid_stars=none
+with_subgrid_star_formation=none
+with_subgrid_feedback=none
 
 case "$with_subgrid" in
    yes)
@@ -961,12 +1287,20 @@ case "$with_subgrid" in
    GEAR)
 	with_subgrid_cooling=grackle
 	with_subgrid_chemistry=GEAR
-	with_subgrid_hydro=gadget2
+	with_subgrid_tracers=none
+	with_subgrid_entropy_floor=none
+	with_subgrid_stars=GEAR
+	with_subgrid_star_formation=GEAR
+	with_subgrid_feedback=thermal
    ;;
    EAGLE)
 	with_subgrid_cooling=EAGLE
 	with_subgrid_chemistry=EAGLE
-	with_subgrid_hydro=gadget2
+	with_subgrid_tracers=EAGLE
+	with_subgrid_entropy_floor=EAGLE
+	with_subgrid_stars=EAGLE
+	with_subgrid_star_formation=EAGLE
+	with_subgrid_feedback=none
    ;;
    *)
       AC_MSG_ERROR([Unknown subgrid choice: $with_subgrid])
@@ -997,20 +1331,12 @@ esac
 # Hydro scheme.
 AC_ARG_WITH([hydro],
    [AS_HELP_STRING([--with-hydro=<scheme>],
-      [Hydro dynamics to use @<:@gadget2, minimal, pressure-entropy, pressure-energy, default, gizmo-mfv, gizmo-mfm, shadowfax, planetary, debug default: gadget2@:>@]
+      [Hydro dynamics to use @<:@gadget2, minimal, pressure-entropy, pressure-energy, pressure-energy-monaghan, default, gizmo-mfv, gizmo-mfm, shadowfax, planetary, anarchy-pu debug default: gadget2@:>@]
    )],
    [with_hydro="$withval"],
    [with_hydro="gadget2"]
 )
 
-if test "$with_subgrid" != "none"; then
-   if test "$with_hydro" != "gadget2"; then
-      AC_MSG_ERROR([Cannot provide with-subgrid and with-hydro together])
-   else
-      with_hydro="$with_subgrid_hydro"
-   fi
-fi
-
 case "$with_hydro" in
    gadget2)
       AC_DEFINE([GADGET2_SPH], [1], [Gadget-2 SPH])
@@ -1024,6 +1350,9 @@ case "$with_hydro" in
    pressure-energy)
       AC_DEFINE([HOPKINS_PU_SPH], [1], [Pressure-Energy SPH])
    ;;
+   pressure-energy-monaghan)
+      AC_DEFINE([HOPKINS_PU_SPH_MONAGHAN], [1], [Pressure-Energy SPH with M&M Variable A.V.])
+   ;;
    default)
       AC_DEFINE([DEFAULT_SPH], [1], [Default SPH])
    ;;
@@ -1039,6 +1368,9 @@ case "$with_hydro" in
    planetary)
       AC_DEFINE([PLANETARY_SPH], [1], [Planetary SPH])
    ;;
+   anarchy-pu)
+      AC_DEFINE([ANARCHY_PU_SPH], [1], [ANARCHY (PU) SPH])
+   ;;
 
 
    *)
@@ -1046,6 +1378,25 @@ case "$with_hydro" in
    ;;
 esac
 
+# Check if debugging interactions stars is switched on.
+AC_ARG_ENABLE([debug-interactions-stars],
+   [AS_HELP_STRING([--enable-debug-interactions-stars],
+     [Activate interaction debugging for stars, logging a maximum of @<:@N@:>@ neighbours. Defaults to 256 if no value set.]
+   )],
+   [enable_debug_interactions_stars="$enableval"],
+   [enable_debug_interactions_stars="no"]
+)
+if test "$enable_debug_interactions_stars" != "no"; then
+    AC_DEFINE([DEBUG_INTERACTIONS_STARS],1,[Enable interaction debugging for stars])
+    if test "$enable_debug_interactions_stars" == "yes"; then
+      AC_DEFINE([MAX_NUM_OF_NEIGHBOURS_STARS],256,[The maximum number of particle neighbours to be logged for stars])
+      [enable_debug_interactions_stars="yes (Logging up to 256 neighbours)"]
+    else
+      AC_DEFINE_UNQUOTED([MAX_NUM_OF_NEIGHBOURS_STARS], [$enableval] ,[The maximum number of particle neighbours to be logged for stars])
+      [enable_debug_interactions_stars="yes (Logging up to $enableval neighbours)"]
+    fi
+fi
+
 # Check if debugging interactions is switched on.
 AC_ARG_ENABLE([debug-interactions],
    [AS_HELP_STRING([--enable-debug-interactions],
@@ -1069,6 +1420,7 @@ if test "$enable_debug_interactions" != "no"; then
   fi
 fi
 
+
 # SPH Kernel function
 AC_ARG_WITH([kernel],
    [AS_HELP_STRING([--with-kernel=<kernel>],
@@ -1198,43 +1550,6 @@ case "$with_riemann" in
       AC_MSG_ERROR([Unknown Riemann solver: $with_riemann])
    ;;
 esac
-
-# Check for grackle.
-have_grackle="no"
-AC_ARG_WITH([grackle],
-    [AS_HELP_STRING([--with-grackle=PATH],
-       [root directory where grackle is installed @<:@yes/no@:>@]
-    )],
-    [with_grackle="$withval"],
-    [with_grackle="no"]
-)
-if test "x$with_grackle" != "xno"; then
-   AC_PROG_FC
-   AC_FC_LIBRARY_LDFLAGS
-   if test "x$with_grackle" != "xyes" -a "x$with_grackle" != "x"; then
-      GRACKLE_LIBS="-L$with_grackle/lib -lgrackle"
-      GRACKLE_INCS="-I$with_grackle/include"
-   else
-      GRACKLE_LIBS="-lgrackle"
-      GRACKLE_INCS=""
-   fi
-
-   have_grackle="yes"
-
-   AC_CHECK_LIB(
-      [grackle],
-      [initialize_chemistry_data],
-      [AC_DEFINE([HAVE_GRACKLE],1,[The GRACKLE library appears to be present.])
-        AC_DEFINE([CONFIG_BFLOAT_8],1,[Use doubles in grackle])
-      ],
-      [AC_MSG_ERROR(Cannot find grackle library!)],
-      [$GRACKLE_LIBS $GRACKLE_INCS $FCLIBS]
-   )
-fi
-AC_SUBST([GRACKLE_LIBS])
-AC_SUBST([GRACKLE_INCS])
-AM_CONDITIONAL([HAVEGRACKLE],[test -n "$GRACKLE_LIBS"])
-
 #  Cooling function
 AC_ARG_WITH([cooling],
    [AS_HELP_STRING([--with-cooling=<function>],
@@ -1262,6 +1577,9 @@ case "$with_cooling" in
    const-lambda)
       AC_DEFINE([COOLING_CONST_LAMBDA], [1], [Const Lambda cooling function])
    ;;
+   compton)
+      AC_DEFINE([COOLING_COMPTON], [1], [Compton cooling off the CMB])
+   ;;
    grackle)
       AC_DEFINE([COOLING_GRACKLE], [1], [Cooling via the grackle library])
       AC_DEFINE([COOLING_GRACKLE_MODE], [0], [Grackle chemistry network, mode 0])
@@ -1318,10 +1636,101 @@ case "$with_chemistry" in
    ;;
 esac
 
+#  Particle tracers
+AC_ARG_WITH([tracers],
+   [AS_HELP_STRING([--with-tracers=<function>],
+      [chemistry function @<:@none, EAGLE default: none@:>@]
+   )],
+   [with_tracers="$withval"],
+   [with_tracers="none"]
+)
+
+if test "$with_subgrid" != "none"; then
+   if test "$with_tracers" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-tracers together])
+   else
+      with_tracers="$with_subgrid_tracers"
+   fi
+fi
+
+case "$with_tracers" in
+   none)
+      AC_DEFINE([TRACERS_NONE], [1], [No tracers function])
+   ;;
+   EAGLE)
+      AC_DEFINE([TRACERS_EAGLE], [1], [Tracers taken from the EAGLE model])
+   ;;
+   *)
+      AC_MSG_ERROR([Unknown tracers choice: $with_tracers])
+   ;;
+esac
+
+# Stellar model.
+AC_ARG_WITH([stars],
+   [AS_HELP_STRING([--with-stars=<model>],
+      [Stellar model to use @<:@none, EAGLE, GEAR, debug default: none@:>@]
+   )],
+   [with_stars="$withval"],
+   [with_stars="none"]
+)
+
+if test "$with_subgrid" != "none"; then
+   if test "$with_stars" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-stars together])
+   else
+      with_stars="$with_subgrid_stars"
+   fi
+fi
+
+case "$with_stars" in
+   EAGLE)
+      AC_DEFINE([STARS_EAGLE], [1], [EAGLE stellar model])
+   ;;
+   GEAR)
+      AC_DEFINE([STARS_GEAR], [1], [GEAR stellar model])
+   ;;
+   none)
+      AC_DEFINE([STARS_NONE], [1], [None stellar model])
+   ;;
+
+   *)
+      AC_MSG_ERROR([Unknown stellar model: $with_stars])
+   ;;
+esac
+
+# Feedback model
+AC_ARG_WITH([feedback],
+   [AS_HELP_STRING([--with-feedback=<model>],
+      [Feedback model to use @<:@none, thermal, debug default: none@:>@]
+   )],
+   [with_feedback="$withval"],
+   [with_feedback="none"]
+)
+
+if test "$with_subgrid" != "none"; then
+   if test "$with_feedback" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-feedback together])
+   else
+      with_feedback="$with_subgrid_feedback"
+   fi
+fi
+
+case "$with_feedback" in
+   thermal)
+      AC_DEFINE([FEEDBACK_THERMAL], [1], [Thermal Blastwave])
+   ;;
+   none)
+   ;;
+
+   *)
+      AC_MSG_ERROR([Unknown feedback model: $with_feedback])
+   ;;
+esac
+
 #  External potential
 AC_ARG_WITH([ext-potential],
    [AS_HELP_STRING([--with-ext-potential=<pot>],
-      [external potential @<:@none, point-mass, point-mass-ring, point-mass-softened, isothermal, softened-isothermal, disc-patch, sine-wave, default: none@:>@]
+      [external potential @<:@none, point-mass, point-mass-ring, point-mass-softened, isothermal, softened-isothermal, nfw, hernquist, disc-patch, sine-wave, default: none@:>@]
    )],
    [with_potential="$withval"],
    [with_potential="none"]
@@ -1336,6 +1745,12 @@ case "$with_potential" in
    isothermal)
       AC_DEFINE([EXTERNAL_POTENTIAL_ISOTHERMAL], [1], [Isothermal external potential])
    ;;
+   hernquist)
+      AC_DEFINE([EXTERNAL_POTENTIAL_HERNQUIST], [1], [Hernquist external potential])
+   ;;
+   nfw)
+      AC_DEFINE([EXTERNAL_POTENTIAL_NFW], [1], [Navarro-Frenk-White external potential])
+   ;;
    disc-patch)
       AC_DEFINE([EXTERNAL_POTENTIAL_DISC_PATCH], [1], [Disc-patch external potential])
    ;;
@@ -1353,6 +1768,65 @@ case "$with_potential" in
    ;;
 esac
 
+#  Entropy floor
+AC_ARG_WITH([entropy-floor], 
+    [AS_HELP_STRING([--with-entropy-floor=<floor>],
+       [entropy floor @<:@none, EAGLE, default: none@:>@] 
+    )],
+    [with_entropy_floor="$withval"],
+    [with_entropy_floor="none"]
+)
+if test "$with_subgrid" != "none"; then
+   if test "$with_entropy_floor" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-entropy-floor together])
+   else
+      with_entropy_floor="$with_subgrid_entropy_floor"
+   fi
+fi
+
+case "$with_entropy_floor" in
+   none)
+      AC_DEFINE([ENTROPY_FLOOR_NONE], [1], [No entropy floor])
+   ;;
+   EAGLE)
+      AC_DEFINE([ENTROPY_FLOOR_EAGLE], [1], [EAGLE entropy floor])
+   ;;
+   *)
+      AC_MSG_ERROR([Unknown entropy floor model])
+   ;;
+esac 
+
+#  Star formation
+AC_ARG_WITH([star-formation], 
+    [AS_HELP_STRING([--with-star-formation=<sfm>],
+       [star formation @<:@none, EAGLE, GEAR, default: none@:>@] 
+    )],
+    [with_star_formation="$withval"],
+    [with_star_formation="none"]
+)
+if test "$with_subgrid" != "none"; then
+   if test "$with_star_formation" != "none"; then
+      AC_MSG_ERROR([Cannot provide with-subgrid and with-star-formation together])
+   else
+      with_star_formation="$with_subgrid_star_formation"
+   fi
+fi
+
+case "$with_star_formation" in
+   none)
+      AC_DEFINE([STAR_FORMATION_NONE], [1], [No star formation])
+   ;;
+   EAGLE)
+      AC_DEFINE([STAR_FORMATION_EAGLE], [1], [EAGLE star formation model (Schaye and Dalla Vecchia (2008))])
+   ;;
+   GEAR)
+      AC_DEFINE([STAR_FORMATION_GEAR], [1], [GEAR star formation model (Revaz and Jablonka (2018))])
+   ;;
+   *)
+      AC_MSG_ERROR([Unknown star formation model])
+   ;;
+esac 
+
 #  Gravity multipole order
 AC_ARG_WITH([multipole-order],
    [AS_HELP_STRING([--with-multipole-order=<order>],
@@ -1371,12 +1845,18 @@ AC_SUBST([GIT_CMD])
 DX_INIT_DOXYGEN(libswift,doc/Doxyfile,doc/)
 AM_CONDITIONAL([HAVE_DOXYGEN], [test "$ac_cv_path_ac_pt_DX_DOXYGEN" != ""])
 
+# Check if using EAGLE cooling
+AM_CONDITIONAL([HAVEEAGLECOOLING], [test $with_cooling = "EAGLE"])
+
 # Handle .in files.
-AC_CONFIG_FILES([Makefile src/Makefile examples/Makefile doc/Makefile doc/Doxyfile tests/Makefile])
+AC_CONFIG_FILES([Makefile src/Makefile examples/Makefile examples/Cooling/CoolingRates/Makefile doc/Makefile doc/Doxyfile tests/Makefile])
+AC_CONFIG_FILES([argparse/Makefile tools/Makefile])
 AC_CONFIG_FILES([tests/testReading.sh], [chmod +x tests/testReading.sh])
 AC_CONFIG_FILES([tests/testActivePair.sh], [chmod +x tests/testActivePair.sh])
 AC_CONFIG_FILES([tests/test27cells.sh], [chmod +x tests/test27cells.sh])
 AC_CONFIG_FILES([tests/test27cellsPerturbed.sh], [chmod +x tests/test27cellsPerturbed.sh])
+AC_CONFIG_FILES([tests/test27cellsStars.sh], [chmod +x tests/test27cellsStars.sh])
+AC_CONFIG_FILES([tests/test27cellsStarsPerturbed.sh], [chmod +x tests/test27cellsStarsPerturbed.sh])
 AC_CONFIG_FILES([tests/test125cells.sh], [chmod +x tests/test125cells.sh])
 AC_CONFIG_FILES([tests/test125cellsPerturbed.sh], [chmod +x tests/test125cellsPerturbed.sh])
 AC_CONFIG_FILES([tests/testPeriodicBC.sh], [chmod +x tests/testPeriodicBC.sh])
@@ -1389,8 +1869,9 @@ AC_CONFIG_FILES([tests/testFormat.sh], [chmod +x tests/testFormat.sh])
 # Save the compilation options
 AC_DEFINE_UNQUOTED([SWIFT_CONFIG_FLAGS],["$swift_config_flags"],[Flags passed to configure])
 
-# Make sure the latest git revision string gets included
-touch src/version.c
+# Make sure the latest git revision string gets included, when we are
+# working in a checked out repository.
+test -d ${srcdir}/.git && touch ${srcdir}/src/version.c
 
 #  Need to define this, instead of using fifth argument of AC_INIT, until
 #  2.64. Defer until now as this redefines PACKAGE_URL, which can emit a
@@ -1413,7 +1894,7 @@ AC_MSG_RESULT([
    MPI enabled          : $enable_mpi
    HDF5 enabled         : $with_hdf5
     - parallel          : $have_parallel_hdf5
-   Metis enabled        : $have_metis
+   METIS/ParMETIS       : $have_metis / $have_parmetis
    FFTW3 enabled        : $have_fftw
    GSL enabled          : $have_gsl
    libNUMA enabled      : $have_numa
@@ -1422,6 +1903,7 @@ AC_MSG_RESULT([
    CPU profiler         : $have_profiler
    Pthread barriers     : $have_pthread_barrier
    VELOCIraptor enabled : $have_velociraptor
+   Particle Logger      : $with_logger
 
    Hydro scheme       : $with_hydro
    Dimensionality     : $with_dimension
@@ -1436,16 +1918,22 @@ AC_MSG_RESULT([
    Make gravity glass  : $gravity_glass_making
    External potential  : $with_potential
 
-   Cooling function   : $with_cooling
-   Chemistry          : $with_chemistry
-
-   Individual timers     : $enable_timers
-   Task debugging        : $enable_task_debugging
-   Threadpool debugging  : $enable_threadpool_debugging
-   Debugging checks      : $enable_debugging_checks
-   Interaction debugging : $enable_debug_interactions
-   Naive interactions    : $enable_naive_interactions
-   Gravity checks        : $gravity_force_checks
-   Custom icbrtf         : $enable_custom_icbrtf
+   Entropy floor        : $with_entropy_floor
+   Cooling function     : $with_cooling
+   Chemistry            : $with_chemistry
+   Tracers              : $with_tracers
+   Stellar model        : $with_stars
+   Star formation model : $with_star_formation
+   Feedback model       : $with_feedback
+
+   Individual timers           : $enable_timers
+   Task debugging              : $enable_task_debugging
+   Threadpool debugging        : $enable_threadpool_debugging
+   Debugging checks            : $enable_debugging_checks
+   Interaction debugging       : $enable_debug_interactions
+   Stars interaction debugging : $enable_debug_interactions_stars
+   Naive interactions          : $enable_naive_interactions
+   Gravity checks              : $gravity_force_checks
+   Custom icbrtf               : $enable_custom_icbrtf
 
  ------------------------])
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index cba52250ccc37f50ed130c70d8a5039d8c786474..db841f347f681c42c1b305c2d130ee0b55d639ae 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -761,13 +761,21 @@ WARN_LOGFILE           =
 
 INPUT                  =  @top_srcdir@ @top_srcdir@/src @top_srcdir@/tests @top_srcdir@/examples
 INPUT		       += @top_srcdir@/src/hydro/Minimal
+INPUT		       += @top_srcdir@/src/hydro/Gadget2
 INPUT		       += @top_srcdir@/src/gravity/Default
 INPUT		       += @top_srcdir@/src/stars/Default
 INPUT		       += @top_srcdir@/src/riemann
 INPUT		       += @top_srcdir@/src/potential/point_mass
 INPUT		       += @top_srcdir@/src/equation_of_state/ideal_gas
+INPUT		       += @top_srcdir@/src/cooling/const_du
+INPUT		       += @top_srcdir@/src/cooling/const_lambda
+INPUT		       += @top_srcdir@/src/cooling/Compton
 INPUT		       += @top_srcdir@/src/cooling/EAGLE
 INPUT		       += @top_srcdir@/src/chemistry/EAGLE
+INPUT		       += @top_srcdir@/src/entropy_floor/EAGLE
+INPUT		       += @top_srcdir@/src/star_formation/EAGLE
+INPUT		       += @top_srcdir@/src/tracers/EAGLE
+INPUT		       += @top_srcdir@/src/stars/EAGLE
 
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -1971,7 +1979,7 @@ SEARCH_INCLUDES        = YES
 # preprocessor.
 # This tag requires that the tag SEARCH_INCLUDES is set to YES.
 
-INCLUDE_PATH           =
+INCLUDE_PATH           = @top_srcdir@/src
 
 # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
 # patterns (like *.h and *.hpp) to filter out the header-files in the
diff --git a/doc/RTD/source/CommandLineOptions/index.rst b/doc/RTD/source/CommandLineOptions/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..88493ddb10ff2a4978e5e4b31a55efc87ba45d3b
--- /dev/null
+++ b/doc/RTD/source/CommandLineOptions/index.rst
@@ -0,0 +1,66 @@
+.. Command line options
+   Matthieu Schaller, 21st October 2018
+
+.. _cmdline-options:
+
+Command line options
+====================
+
+SWIFT requires a number of runtime options to run and get any sensible output.
+For instance, just running the ``swift`` binary will not use any SPH or gravity;
+the particles will just sit still!
+
+Below is a list of the command line options and when they should be used. The same list
+can be found by typing ``./swift -h``::
+
+    -h, --help                        show this help message and exit
+
+  Simulation options:
+
+    -b, --feedback                    Run with stars feedback.
+    -c, --cosmology                   Run with cosmological time integration.
+    --temperature                     Run with temperature calculation. 
+    -C, --cooling                     Run with cooling (also switches on --with-temperature).
+    -D, --drift-all                   Always drift all particles even the ones
+                                      far from active particles. This emulates
+                                      Gadget-[23] and GIZMO's default behaviours.
+    -F, --star-formation	      Run with star formation.
+    -g, --external-gravity            Run with an external gravitational potential.
+    -G, --self-gravity                Run with self-gravity.
+    -M, --multipole-reconstruction    Reconstruct the multipoles every time-step.
+    -s, --hydro                       Run with hydrodynamics.
+    -S, --stars                       Run with stars.
+    -x, --velociraptor                Run with structure finding.
+    --limiter                         Run with time-step limiter.
+
+  Control options:
+
+    -a, --pin                         Pin runners using processor affinity.
+    -d, --dry-run                     Dry run. Read the parameter file, allocates
+                                      memory but does not read the particles
+                                      from ICs. Exits before the start of time
+                                      integration. Checks the validity of
+                                      parameters and IC files as well as memory
+                                      limits.
+    -e, --fpe                         Enable floating-point exceptions (debugging
+                                      mode).
+    -f, --cpu-frequency=<str>         Overwrite the CPU frequency (Hz) to be
+                                      used for time measurements.
+    -n, --steps=<int>                 Execute a fixed number of time steps.
+                                      When unset use the time_end parameter
+                                      to stop.
+    -o, --output-params=<str>         Generate a default output parameter
+                                      file.
+    -P, --param=<str>                 Set parameter value, overiding the value
+                                      read from the parameter file. Can be used
+                                      more than once {sec:par:value}.
+    -r, --restart                     Continue using restart files.
+    -t, --threads=<int>               The number of threads to use on each MPI
+                                      rank. Defaults to 1 if not specified.
+    -T, --timers=<int>                Print timers every time-step.
+    -v, --verbose=<int>               Run in verbose mode, in MPI mode 2 outputs
+                                      from all ranks.
+    -y, --task-dumps=<int>            Time-step frequency at which task analysis
+                                      files and/or tasks are dumped.
+    -Y, --threadpool-dumps=<int>      Time-step frequency at which threadpool
+                                      tasks are dumped.
diff --git a/doc/RTD/source/Cooling/index.rst b/doc/RTD/source/Cooling/index.rst
deleted file mode 100644
index 46a01b2a054629b7fc13f0ea190c2a5a0fdd6d9c..0000000000000000000000000000000000000000
--- a/doc/RTD/source/Cooling/index.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-.. Equation of State
-   Loic Hausammann, 7th April 2018
-
-.. _cooling:
-
-Cooling
-=======
-
-Currently, we have 5 different cooling (EAGLE, Grackle, const-lambda, const-du
-and none).  Three of them are easily solved analytically (const-lambda,
-const-du and none) while the two last requires complex chemical networks.
-
-
-Equations
----------
-
-The first table compares the different analytical cooling while the next ones
-are specific to a given cooling.  The quantities are the internal energy (\\( u
-\\)), the density \\( rho \\), the element mass fraction (\\( X_i \\)), the
-cooling function (\\(\\Lambda\\), the proton mass (\\( m_H \\)) and the time
-step condition (\\( t\_\\text{step}\\)).  If not specified otherwise, all
-cooling contains a temperature floor avoiding negative temperature.
-
-.. csv-table:: Analytical Cooling
-   :header: "Variable", "Const-Lambda", "Const-du", "None"
-
-   "\\( \\frac{ \\mathrm{d}u }{ \\mathrm{d}t } \\)", "\\( -\\Lambda \\frac{\\rho^2 X_H^2}{\\rho m_H^2} \\)", "const", "0"
-   "\\( \\Delta t\_\\text{max} \\)", "\\( t\_\\text{step} \\frac{u}{\\left|\\frac{ \\mathrm{d}u }{ \\mathrm{d}t }\\right|} \\)", "\\( t\_\\text{step} \\frac{u}{\\ \\left| \\frac{ \\mathrm{d}u }{ \\mathrm{d}t }\\right|} \\)", "None"
-
-
-Grackle
-~~~~~~~
-   
-Grackle is a chemistry and cooling library presented in B. Smith et al. 2016
-(do not forget to cite if used).  Four different modes are available:
-equilibrium, 6 species network (H, H\\( ^+ \\), e\\( ^- \\), He, He\\( ^+ \\)
-and He\\( ^{++} \\)), 9 species network (adds H\\(^-\\), H\\(_2\\) and
-H\\(_2^+\\)) and 12 species (adds D, D\\(^+\\) and HD).  Following the same
-order, the swift cooling options are ``grackle``, ``grackle1``, ``grackle2``
-and ``grackle3`` (the numbers correspond to the value of
-``primordial_chemistry`` in Grackle).  It also includes some self-shielding
-methods and UV background.  In order to use the Grackle cooling, you will need
-to provide an HDF5 table computed by Cloudy.
-
-When starting a simulation without providing the different fractions, the code
-supposes an equilibrium and computes the fractions automatically.
-
-Eagle
-~~~~~
-
-TODO
-
-How to Implement a New Cooling
-------------------------------
-
-The developper should provide at least one function for:
- * writing the cooling name in HDF5
- * cooling a particle
- * the maximal time step possible
- * initializing a particle
- * computing the total energy radiated by a particle
- * initializing the cooling parameters
- * printing the cooling type
-
-For implementation details, see ``src/cooling/none/cooling.h``
-
-See :ref:`new_option` for the full list of changes required.
diff --git a/doc/RTD/source/EquationOfState/index.rst b/doc/RTD/source/EquationOfState/index.rst
index 3558041e9513b967a2530165acec5e5f4f11a364..11e6069130a84ba54dec01e9892464574d9c2c6b 100644
--- a/doc/RTD/source/EquationOfState/index.rst
+++ b/doc/RTD/source/EquationOfState/index.rst
@@ -1,19 +1,21 @@
-.. Equation of State
+.. Equations of State
    Loic Hausammann, 6th April 2018
+   Jacob Kegerreis, 3rd February 2019
 
 .. _equation_of_state:
 
-Equation of State
-=================
+Equations of State
+==================
 
-Currently (if the documentation was well updated), we have two different
-equation of states implemented: ideal gas and isothermal.  They describe the
-relations between our main thermodynamical variables: the internal energy
-(\\(u\\)), the density (\\(\\rho\\)), the entropy (\\(A\\)) and the pressure
-(\\(P\\)).
+Currently (if the documentation was well updated), we have two different gas
+equations of state (EoS) implemented: ideal and isothermal; as well as a variety  
+of EoS for "planetary" materials. 
+The EoS describe the relations between our main thermodynamical variables: 
+the internal energy (\\(u\\)), the density (\\(\\rho\\)), the entropy (\\(A\\)) 
+and the pressure (\\(P\\)).
 
-Equations
----------
+Gas EoS
+-------
 
 In the following section, the variables not yet defined are: \\(\\gamma\\) for
 the adiabatic index and \\( c_s \\) for the speed of sound.
@@ -37,12 +39,55 @@ the adiabatic index and \\( c_s \\) for the speed of sound.
    "\\( c_s\\)", "", "\\(\\sqrt{ u \\gamma \\left( \\gamma - 1 \\right) } \\)", ""
 
 
+
+Planetary EoS
+-------------
+Configuring SWIFT with the ``--with-equation-of-state=planetary`` and 
+``--with-hydro=planetary`` options enables the use of multiple EoS.
+Every SPH particle then requires and carries the additional ``MaterialID`` flag 
+from the initial conditions file. This flag indicates the particle's material 
+and which EoS it should use. 
+
+So far, we have implemented several Tillotson, SESAME, and Hubbard \& MacFarlane 
+(1980) materials, with more on their way.
+The material's ID is set by a base type ID (multiplied by 100), plus a minor 
+type:
+
++ Tillotson (Melosh, 2007): ``1``
+    + Iron: ``100``
+    + Granite: ``101``
+    + Water: ``102``
++ Hubbard \& MacFarlane (1980): ``2``
+    + Hydrogen-helium atmosphere: ``200``
+    + Ice H20-CH4-NH3 mix: ``201``
+    + Rock SiO2-MgO-FeS-FeO mix: ``202``
++ SESAME (and similar): ``3``
+    + Iron (2140): ``300``
+    + Basalt (7530): ``301``
+    + Water (7154): ``302``
+    + Senft \& Stewart (2008) water (in a SESAME-style table): ``303``
+
+Unlike the EoS for an ideal or isothermal gas, these more complicated materials 
+do not always include transformations between the internal energy, 
+temperature, and entropy. At the moment, we have only implemented 
+\\(P(\\rho, u)\\) and \\(c_s(\\rho, u)\\). 
+This is sufficient for the simple :ref:`planetary_sph` hydrodynamics scheme, 
+but makes these materials currently incompatible with other entropy-based 
+schemes.
+
+The Tillotson sound speed was derived using 
+\\(c_s^2 = \\left. \\dfrac{\\partial P}{\\partial \\rho} \\right|_S \\)
+as described in Kegerreis et al. (2019).
+The table files for the HM80 and SESAME-style EoS can be downloaded using 
+the ``examples/EoSTables/get_eos_tables.sh`` script.
+
+
 How to Implement a New Equation of State
 ----------------------------------------
 
 See :ref:`new_option` for a full list of required changes.
 
-You will need to provide a ``equation_of_state.h`` file containing: the
+You will need to provide an ``equation_of_state.h`` file containing: the
 definition of ``eos_parameters``, IO functions and transformations between the
 different variables: \\(u(\\rho, A)\\), \\(u(\\rho, P)\\), \\(P(\\rho,A)\\),
 \\(P(\\rho, u)\\), \\(A(\\rho, P)\\), \\(A(\\rho, u)\\), \\(c_s(\\rho, A)\\),
diff --git a/doc/RTD/source/ExternalPotentials/index.rst b/doc/RTD/source/ExternalPotentials/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ca33eb8189eea216863feb02579344aa22916696
--- /dev/null
+++ b/doc/RTD/source/ExternalPotentials/index.rst
@@ -0,0 +1,80 @@
+.. External potentials in SWIFT
+   Folkert Nobels, 25th October 2018
+   
+External Potentials 
+===================
+
+SWIFT can be run with an external potential on this page we will summarize the
+current potentials which can be run with SWIFT and how to implement your own 
+potential in SWIFT.
+
+Implemented External Potentials
+-------------------------------
+
+Currently there are several potentials implemented in SWIFT. On this page we 
+give a short overview of the potentials that are implemented in the code:
+
+1. No potential (none)
+2. Point mass potential (point-mass): classical point mass, can be placed at
+   a position with a mass.
+3. Plummer potential (point-mass-softened): in the code a softened point mass 
+   corresponds to a Plummer potential, can be placed at a position with a mass.
+4. Isothermal potential (isothermal): An isothermal potential which corresponds 
+   to a density profile which is :math:`\propto r^{-2}` and a potential which is 
+   logarithmic. This potential has as free parameters the rotation velocity 
+   and the position.
+5. Hernquist potential (hernquist): A potential that is given by the Hernquist 
+   potential: 
+   
+   :math:`\Phi(r) = - \frac{GM}{r+a}.`
+
+   The free parameters of Hernquist potential are mass, scale length,
+   and softening. The potential can be set at any position in the box.
+6. NFW potential (nfw): The most used potential to describe dark matter halos, the  
+   potential is given by:
+
+   :math:`\Phi(r) = - \frac{4\pi G \rho_0 R_s^3}{r} \ln \left( 1+ 
+   \frac{r}{R_s} \right).`
+
+   This potential has as free parameters the concentration of the DM halo, the
+   virial mass (:math:`M_{200}`) and the critical density.
+7. Sine wave (sine-wave)
+8. Point mass ring (point-mass-ring)
+9. Disc Patch (disc-patch)
+
+
+How to implement your own potential
+-----------------------------------
+
+The first step in implementing your own potential is making a directory of your
+potential in the ``src/potential`` folder and creating a file in the folder 
+called ``potential.h``.
+
+Configuring the potential 
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To get started you can copy a ``potential.h`` file from an already implemented 
+potential. In this potential the header guards (e.g. ``#IFDEF <>``) need to be 
+changed to the specific potential and the ``struct`` and 
+``potential_init_backend`` need to be  changed such that it uses your potential 
+and reads the correct potential from the parameter file during running the 
+program.
+
+Add the potential to the ``potential.h`` file in the ``src`` directory such that
+the program knows that it is possible to run with this potential.
+
+Furthermore during the configuration of the code it also needs to be clear for 
+the program that the code can be configured to run with the different 
+potentials. This means that the ``configure.ac`` file needs to be changed.
+This can be done to add an other case in the potential::
+
+  case "$with_potential" in
+     none)
+        AC_DEFINE([EXTERNAL_POTENTIAL_NONE], [1], [No external potential])
+     ;;
+     newpotential)
+        AC_DEFINE([EXTERNAL_POTENTIAL_NEWPOTENTIAL], [1], [New external potential])
+     ;;
+
+After this change it is possible to configure the code to use your new potential.
+
diff --git a/doc/RTD/source/GettingStarted/compiling_code.rst b/doc/RTD/source/GettingStarted/compiling_code.rst
index c40f06965e15146c41bf210aec3b195032cef0e7..0cfde4d18db62c2e0b41e652c73a6b6ad268440e 100644
--- a/doc/RTD/source/GettingStarted/compiling_code.rst
+++ b/doc/RTD/source/GettingStarted/compiling_code.rst
@@ -24,6 +24,15 @@ MPI
 A recent implementation of MPI, such as Open MPI (v2.x or higher), is required,
 or any library that implements at least the MPI 3 standard.
 
+Running SWIFT on OmniPath atchitechtures with Open MPI
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running SWIFT on an OmniPath system we suggest that Open MPI v3.1.3 or higher
+is used. A bug in the ``psm2`` library causes communications to be lost. It is
+possible to run SWIFT with older versions (tested with v2.1.x) of Open MPI so
+long as ``psm`` is used instead of ``psm2``, i.e. that you invoke ``mpirun``
+with ``--mca btl vader,self -mca mtl psm``.
+
 Libtool
 ~~~~~~~
 The build system depends on libtool.
@@ -32,13 +41,13 @@ FFTW
 ~~~~
 Version 3.3.x or higher is required for periodic gravity.
 
-METIS
-~~~~~
-METIS is used for domain decomposition and load balancing.
+ParMETIS or METIS
+~~~~~~~~~~~~~~~~~
+One is required for domain decomposition and load balancing.
 
 libNUMA
 ~~~~~~~
-libNUMA is used to pin threads.
+libNUMA is used to pin threads (but see INSTALL.swift).
 
 GSL
 ~~~
diff --git a/doc/RTD/source/GettingStarted/configuration_options.rst b/doc/RTD/source/GettingStarted/configuration_options.rst
index e37384cfd1c29cb1df82cc180a763f4859650b2e..7dca5cddb0012b9a2146640b9373cfbe81c8dbdd 100644
--- a/doc/RTD/source/GettingStarted/configuration_options.rst
+++ b/doc/RTD/source/GettingStarted/configuration_options.rst
@@ -45,6 +45,6 @@ Several cooling implementations (including GRACKLE) are available.
 Many external potentials are available for use with SWIFT. You can choose
 between them at compile time. Some examples include a central potential, a
 softened central potential, and a sinusoidal potential. You will need to
-configure, for example, the mass in your parameterfile at runtime.
+configure, for example, the mass in your parameter file at runtime.
 
 
diff --git a/doc/RTD/source/GettingStarted/index.rst b/doc/RTD/source/GettingStarted/index.rst
index 36de8ea740490c16bc9d6b69d871290e80dc2091..2086bcfb4af0ac1b7bbc24c34caa85fa1ebec498 100644
--- a/doc/RTD/source/GettingStarted/index.rst
+++ b/doc/RTD/source/GettingStarted/index.rst
@@ -20,7 +20,6 @@ and keep on your desk.
    running_example
    runtime_options
    configuration_options
-   parameter_file
    what_about_mpi
    running_on_large_systems
    special_modes
diff --git a/doc/RTD/source/GettingStarted/running_example.rst b/doc/RTD/source/GettingStarted/running_example.rst
index 854e74cf830d58e51cf866d59a93ede6dceb57b6..9dfbdd8c8ec98ea59892a551691aa5f230052e2e 100644
--- a/doc/RTD/source/GettingStarted/running_example.rst
+++ b/doc/RTD/source/GettingStarted/running_example.rst
@@ -14,19 +14,19 @@ as ``wget`` for grabbing the glass).
    cd examples/SodShock_3D
    ./getGlass.sh
    python makeIC.py
-   ../swift -s -t 4 sodShock.yml
+   ../swift --hydro --threads=4 sodShock.yml
    python plotSolution.py 1
 
 
 This will run the 'SodShock' in 3D and produce a nice plot that shows you
-how the density has varied. Try running with GIZMO (this will take
+how the density has varied. Try running with GIZMO-MFV (this will take
 _significantly_ longer than with SPH) to see the difference. For that, you
 will need to reconfigure with the following options:
 
 .. code-block:: bash
    
    ./configure \
-   --with-hydro=gizmo \
+   --with-hydro=gizmo-mfv \
    --with-riemann-solver=hllc
 
 
diff --git a/doc/RTD/source/GettingStarted/running_on_large_systems.rst b/doc/RTD/source/GettingStarted/running_on_large_systems.rst
index 55eb812cef21474045931490591b3978841a4085..42beedf790dc3d87895cd9bd6db13f25942b0a16 100644
--- a/doc/RTD/source/GettingStarted/running_on_large_systems.rst
+++ b/doc/RTD/source/GettingStarted/running_on_large_systems.rst
@@ -26,9 +26,8 @@ system (i.e. over MPI on several nodes). Here are some recommendations:
 + Run with threads pinned. You can do this by passing the ``-a`` flag to the
   SWIFT binary. This ensures that processes stay on the same core that spawned
   them, ensuring that cache is accessed more efficiently.
-+ Ensure that you compile with METIS. More information is available in an
-  upcoming paper, but using METIS allows for work to be distributed in a
-  more efficient way between your nodes.
++ Ensure that you compile with ParMETIS or METIS. These are required if
+  want to load balance between MPI ranks.
 
 Your batch script should look something like the following (to run on 16 nodes
 each with 2x16 core processors for a total of 512 cores):
@@ -38,5 +37,5 @@ each with 2x16 core processors for a total of 512 cores):
    #SBATCH -N 16  # Number of nodes to run on
    #SBATCH --tasks-per-node=2  # This system has 2 chips per node
    
-   mpirun -np 32 swift_mpi -t 16 -a parameter.yml
+   mpirun -np 32 swift_mpi --threads=16 --pin parameter.yml
 
diff --git a/doc/RTD/source/GettingStarted/runtime_options.rst b/doc/RTD/source/GettingStarted/runtime_options.rst
index b2ca10640d8830b9b5ecb8e117bf047af738889c..fdd2c1233cc09cc3a46c8eb2e38efb10729a2950 100644
--- a/doc/RTD/source/GettingStarted/runtime_options.rst
+++ b/doc/RTD/source/GettingStarted/runtime_options.rst
@@ -8,34 +8,5 @@ SWIFT requires a number of runtime options to run and get any sensible output.
 For instance, just running the ``swift`` binary will not use any SPH or gravity;
 the particles will just sit still!
 
-Below is a list of the runtime options and when they should be used. The same list
-can be found by typing ``./swift -h``.
+A list of available command line options can be found on the :ref:`cmdline-options` page.
 
-+ ``-a``: Pin runners using processor affinity.
-+ ``-c``: Run with cosmological time integration.
-+ ``-C``: Run with cooling.
-+ ``-d``: Dry run. Read the parameter file, allocate memory but does not read
-  the particles from ICs and exit before the start of time integration. Allows
-  user to check validity of parameter and IC files as well as memory limits.
-+ ``-D``: Always drift all particles even the ones far from active particles.
-  This emulates Gadget-[23] and GIZMO's default behaviours.
-+ ``-e``: Enable floating-point exceptions (debugging mode).
-+ ``-f``: {int} Overwrite the CPU frequency (Hz) to be used for time measurements.
-+ ``-g``: Run with an external gravitational potential.
-+ ``-G``: Run with self-gravity.
-+ ``-M``: Reconstruct the multipoles every time-step.
-+ ``-n``: {int} Execute a fixed number of time steps. When unset use the
-  time_end parameter to stop.
-+ ``-o``: {str} Generate a default output parameter file.
-+ ``-P``: {sec:par:val} Set parameter value and overwrites values read from the
-  parameters file. Can be used more than once.
-+ ``-s``: Run with hydrodynamics.
-+ ``-S``: Run with stars.
-+ ``-t``: {int} The number of threads to use on each MPI rank. Defaults to 1 if
-  not specified.
-+ ``-T``: Print timers every time-step.
-+ ``-v``: [12] Increase the level of verbosity: 1, MPI-rank 0 writes, 2, All
-  MPI-ranks write.
-+ ``-y``: {int} Time-step frequency at which task graphs are dumped.
-+ ``-Y``: {int} Time-step frequency at which threadpool tasks are dumped.
-+ ``-h``: Print a help message and exit.
diff --git a/doc/RTD/source/GettingStarted/what_about_mpi.rst b/doc/RTD/source/GettingStarted/what_about_mpi.rst
index 098fd35d80d71866cb86d2342d5d54710cd73a82..98141049f3e36506d6033259e7f5bb9394daf997 100644
--- a/doc/RTD/source/GettingStarted/what_about_mpi.rst
+++ b/doc/RTD/source/GettingStarted/what_about_mpi.rst
@@ -9,4 +9,4 @@ and the other ``swift_mpi``. Current wisdom is to run ``swift`` if you are only
 using one node (i.e. without any interconnect), and one MPI rank per NUMA
 region using ``swift_mpi`` for anything larger. You will need some GADGET-2
 HDF5 initial conditions to run SWIFT, as well as a compatible yaml
-parameterfile.
+parameter file.
diff --git a/doc/RTD/source/HydroSchemes/adding_your_own.rst b/doc/RTD/source/HydroSchemes/adding_your_own.rst
index 2d7e640f66153a17e19f4e4c456cd37eed19a95a..549a7a42a22e7f755ad342b86b24c28f67118838 100644
--- a/doc/RTD/source/HydroSchemes/adding_your_own.rst
+++ b/doc/RTD/source/HydroSchemes/adding_your_own.rst
@@ -13,7 +13,7 @@ Adding Hydro Schemes
 SWIFT is engineered to enable you to add your own hydrodynamics schemes easily.
 We enable this through the use of header files to encapsulate each scheme.
 
-Note that it's unlikely you will ever have to consider paralellism or 'loops over
+Note that it's unlikely you will ever have to consider parallelism or 'loops over
 neighbours' for SWIFT; all of this is handled by the tasking system. All we ask
 for is the interaction functions that tell us how to a) compute the density
 and b) compute forces.
@@ -69,7 +69,7 @@ will need to 'fill out' the following:
 + ``hydro_compute_timestep(p, xp, hydro_props, cosmo)`` returns the timestep for 
   the hydrodynamics particles.
 + ``hydro_timestep_extra(p, dt)`` does some extra hydro operations once the
-  physical timestel for the particle is known.
+  physical timestep for the particle is known.
 + ``hydro_init_part(p, hydro_space)`` initialises the particle in preparation for
   the density calculation. This essentially sets properties, such as the density,
   to zero.
diff --git a/doc/RTD/source/HydroSchemes/gizmo.rst b/doc/RTD/source/HydroSchemes/gizmo.rst
index 365e1dc41c27f7c92bfb33859bedad2d96f35248..bbfcae04e1abac57b1476e4533bf92e051e6769d 100644
--- a/doc/RTD/source/HydroSchemes/gizmo.rst
+++ b/doc/RTD/source/HydroSchemes/gizmo.rst
@@ -10,7 +10,7 @@ GIZMO-Like Scheme
    :caption: Contents:
 
 
-There is a meshless finite volume (MFV) GIZMO-like scheme implemented in SWIFT
+There is a mesh-less finite volume (MFV) GIZMO-like scheme implemented in SWIFT
 (see Hopkins 2015 for more information). You will need a Riemann solver to run
 this, and configure as follows:
 
@@ -19,7 +19,7 @@ this, and configure as follows:
    ./configure --with-hydro="gizmo-mfv" --with-riemann-solver="hllc"
 
 
-We also have the meshless finite mass (MFM) GIZMO-like scheme. You can select
+We also have the mesh-less finite mass (MFM) GIZMO-like scheme. You can select
 this at compile-time with the following configuration flags:
 
 .. code-block:: bash
diff --git a/doc/RTD/source/HydroSchemes/hopkins_sph.rst b/doc/RTD/source/HydroSchemes/hopkins_sph.rst
index bcc51e0ad96b18956f1c8e54f7bf2bf3b352c138..e4f1479230df96eabaa1fe16994960059858613b 100644
--- a/doc/RTD/source/HydroSchemes/hopkins_sph.rst
+++ b/doc/RTD/source/HydroSchemes/hopkins_sph.rst
@@ -28,3 +28,9 @@ scheme it includes a Monaghan AV scheme and a Balsara switch.
 .. code-block:: bash
    
    ./configure --with-hydro="pressure-energy"
+
+Both of the above schemes use a very simple, fixed artificial viscosity, only
+the ``SPH:viscosity_alpha`` parameter has any effect for this scheme. This will
+change the strength of the artificial viscosity throughout the simulation, and
+has a default of 0.8.
+
diff --git a/doc/RTD/source/HydroSchemes/index.rst b/doc/RTD/source/HydroSchemes/index.rst
index cd6c169245e83440a1258d216991763488586c0c..462bb7378162ff1addab3212a6901412195a3377 100644
--- a/doc/RTD/source/HydroSchemes/index.rst
+++ b/doc/RTD/source/HydroSchemes/index.rst
@@ -15,6 +15,7 @@ schemes available in SWIFT, as well as how to implement your own.
 
    traditional_sph
    minimal_sph
+   planetary
    hopkins_sph
    gizmo
    adding_your_own
diff --git a/doc/RTD/source/HydroSchemes/minimal_sph.rst b/doc/RTD/source/HydroSchemes/minimal_sph.rst
index 1a16a23360aaba8b28920150af0d4f4b05c74c2f..bbcbe026b56381c007f58920f31115f9f9160d05 100644
--- a/doc/RTD/source/HydroSchemes/minimal_sph.rst
+++ b/doc/RTD/source/HydroSchemes/minimal_sph.rst
@@ -10,11 +10,17 @@ Minimal (Density-Energy) SPH
    :caption: Contents:
 
 This scheme is a textbook implementation of Density-Energy SPH, and can be used
-as a pedagogical example. It also implements a Monaghan AV scheme, like the
-GADGET-2 scheme. It uses very similar equations, but differs in implementation
-details; namely it tracks the internal energy \(u\) as the thermodynamic
-variable, rather than entropy \(A\). To use the minimal scheme, use
+as a pedagogical example. It also implements a Monaghan AV scheme with a
+Balsara switch, like the GADGET-2 scheme. It uses very similar equations, but
+differs in implementation details; namely it tracks the internal energy \(u\)
+as the thermodynamic variable, rather than entropy \(A\). To use the minimal
+scheme, use
 
 .. code-block:: bash
 
     ./configure --with-hydro="minimal"
+
+As it uses a very simple, fixed artificial viscosity, only the
+``SPH:viscosity_alpha`` parameter has any effect for this scheme. This will
+change the strength of the artificial viscosity throughout the simulation,
+and has a default of 0.8.
diff --git a/doc/RTD/source/HydroSchemes/planetary.rst b/doc/RTD/source/HydroSchemes/planetary.rst
new file mode 100755
index 0000000000000000000000000000000000000000..20f41758baadba2cddb99e79d3435bb3301065e0
--- /dev/null
+++ b/doc/RTD/source/HydroSchemes/planetary.rst
@@ -0,0 +1,26 @@
+.. Planetary SPH
+    Jacob Kegerreis, 3rd February 2019
+
+.. _planetary_sph:
+
+Planetary (Density-Energy, Multi-Material) SPH
+==============================================
+
+.. toctree::
+   :maxdepth: 2
+   :hidden:
+   :caption: Contents:
+
+This scheme is the same as the Minimal SPH scheme but also allows multiple 
+materials, meaning that different SPH particles can be assigned different 
+:ref:`equation_of_state` (EoS).
+
+To use the planetary scheme and the corresponding planetary EoS, use 
+
+.. code-block:: bash
+
+    ./configure --with-hydro=planetary --with-equation-of-state=planetary
+
+Every SPH particle then requires and carries the additional ``MaterialID`` flag 
+from the initial conditions file. This flag indicates the particle's material 
+and which EoS it should use. 
\ No newline at end of file
diff --git a/doc/RTD/source/HydroSchemes/traditional_sph.rst b/doc/RTD/source/HydroSchemes/traditional_sph.rst
index c69ea5f60644119b8590414ffe00a75246de49a6..455e8bebe516bd9be9f6df889f1ead2088ca94d2 100644
--- a/doc/RTD/source/HydroSchemes/traditional_sph.rst
+++ b/doc/RTD/source/HydroSchemes/traditional_sph.rst
@@ -15,3 +15,8 @@ a Monaghan artificial viscosity scheme and Balsara switch.
 To use this hydro scheme, you need no extra configuration options -- it is the
 default!
 
+As it uses a very simple, fixed artificial viscosity, only the
+``SPH:viscosity_alpha`` parameter has any effect for this scheme. This will
+change the strength of the artificial viscosity throughout the simulation,
+and has a default of 0.8.
+
diff --git a/doc/RTD/source/InitialConditions/index.rst b/doc/RTD/source/InitialConditions/index.rst
index eba438c722fbf4ffd78984aa55d6bfa5efcd71ad..e585c9aa55f269ebbbf9b2d83034b96a688a99f4 100644
--- a/doc/RTD/source/InitialConditions/index.rst
+++ b/doc/RTD/source/InitialConditions/index.rst
@@ -11,17 +11,21 @@ conditions format as the popular `GADGET-2
 its type 3 format. Note that we do not support the GADGET-2 types 1 and 2
 formats.
 
+One crucial difference is that whilst GADGET-2 can have initial conditions split
+over many files SWIFT only supports initial conditions in one single file. **ICs
+split over multiple files cannot be read by SWIFT**. See the
+":ref:`multiple_files_ICs`" section below for possible solutions. In GADGET-2
+having multiple files allows multiple ones to be read in parallel and is the
+only way the code can handle more than 2^31 particles. This limitation is not in
+place in SWIFT. A single file can contain any number of particles (well... up to
+2^64...)  and the file is read in parallel by HDF5 when running on more than one
+compute node.
+
 The original GADGET-2 file format only contains 2 types of particles: gas
-particles and 5 sorts of collisionless particles that allow users to run with 5
+particles and 5 sorts of collision-less particles that allow users to run with 5
 separate particle masses and softenings. In SWIFT, we expand on this by using
 two of these types for stars and black holes.
 
-GADGET-2 can have initial conditions split over many files. This allow multiple
-ones to be read in parallel and is the only way the code can handle more than
-2^31 particles. This limitation is not in place in SWIFT. A single file can
-contain any number of particles (well... up to 2^64...) and the file is read in
-parallel by HDF5 when running on more than one compute node.
-
 As the original documentation for the GADGET-2 initial conditions format is
 quite sparse, we lay out here all of the necessary components. If you are
 generating your initial conditions from python, we recommend you use the h5py
@@ -35,7 +39,7 @@ You can find out more about the HDF5 format on their `webpages
 Structure of the File
 ---------------------
 
-There are several groups that contain 'auxilliary' information, such as
+There are several groups that contain 'auxiliary' information, such as
 ``Header``.  Particle data is placed in separate groups depending of the type of
 the particles. Some types are currently ignored by SWIFT but are kept in the
 file format for compatibility reasons.
@@ -98,7 +102,7 @@ In the ``/Header/`` group, the following attributes are required:
   ``NumPart_Total`` to be >2^31, the use of ``NumPart_Total_HighWord`` is only
   here for compatibility reasons.
 + ``Flag_Entropy_ICs``, a historical value that tells the code if you have
-  included entropy or internal energy values in your intial conditions files.
+  included entropy or internal energy values in your initial conditions files.
   Acceptable values are 0 or 1. We recommend using internal energies over
   entropy in the ICs and hence have this flag set to 0.
 
@@ -113,19 +117,12 @@ GADGET-2 based analysis programs:
   exactly the same as the ``NumPart_Total`` array. As SWIFT only uses ICs
   contained in a single file, this is not necessary for SWIFT-only ICs.
 + ``NumFilesPerSnapshot``, again a historical integer value that tells the code
-  how many files there are per snapshot. You will probably want to set this to 1.
+  how many files there are per snapshot. You will probably want to set
+  this to 1. If this field is present in a SWIFT IC file and has a
+  value different from 1, the code will return an error message.
 + ``Time``, time of the start of the simulation in internal units or expressed
   as a scale-factor for cosmological runs. SWIFT ignores this and reads it from
   the parameter file.
-  
-RuntimePars
-~~~~~~~~~~~
-
-In the ``/RuntimePars/``, the following attributes are required:
-
-+ ``PeriodicBoundaryConditionsOn``, a flag to tell the code whether or not you
-  have periodic boundaries switched on. Again, this is historical; it should be
-  set to 1 (default) if you have the code running in periodic mode, or 0 otherwise.
 
 
 Particle Data
@@ -145,12 +142,12 @@ individual particle type (e.g. ``/PartType0/``) that have the following *dataset
   velocities divided by ``sqrt(a)`` (see below for a fix).
 + ``ParticleIDs``, an array of length N that are unique identifying numbers for
   each particle. Note that these have to be unique to a particle, and cannot be
-  the same even between particle types. The **IDs must be >1**. 0 or negative
+  the same even between particle types. The **IDs must be >= 0**. Negative
   IDs will be rejected by the code.
 + ``Masses``, an array of length N that gives the masses of the particles.
 
 For ``PartType0`` (i.e. particles that interact through hydro-dynamics), you will
-need the following auxilliary items:
+need the following auxiliary items:
 
 + ``SmoothingLength``, the smoothing lengths of the particles. These will be
   tidied up a bit, but it is best if you provide accurate numbers. In
@@ -172,11 +169,13 @@ h-free quantities. Switching this parameter on will also affect the box size
 read from the ``/Header/`` group (see above).
 
 Similarly, GADGET cosmological ICs have traditionally used velocities expressed
-as peculiar velocities divided by ``sqrt(a)``. This can be undone by swicthing
+as peculiar velocities divided by ``sqrt(a)``. This can be undone by switching
 on the parameter ``InitialConditions:cleanup_velocity_factors`` in the
 :ref:`Parameter_File_label`.
 
-     
+
+.. _ICs_units_label:
+
 Optional Components
 -------------------
 
@@ -214,8 +213,6 @@ You should have an HDF5 file with the following structure:
      Flag_Entropy_ICs=0
      NumPart_Total=[0, 1, 0, 0, 4, 5]
      NumPart_Total_HighWord=[0, 0, 0, 0, 0, 0]
-   RuntimePars/
-     PeriodicBoundariesOn=1
    Units/
      Unit current in cgs (U_I)=1.0
      Unit length in cgs (U_L)=1.0
@@ -235,4 +232,27 @@ You should have an HDF5 file with the following structure:
      ParticleIDs=[...]
      Masses=[...]
 
+.. _multiple_files_ICs:
+     
+ICs split over multiple files
+-----------------------------
+
+A basic script ``tools/combine_ics.py`` is provided to merge basic GADGET-2
+initial conditions split into multiple files into one single valid file. This
+script can handle simple HDF5 files (GADGET-2 type 3 ICs) that follow the format
+described above but split over multiple files.
+
+The script can also convert ICs using a ``MassTable`` and create the
+corresponding particle fields. Note that additional fields present in ICs beyond
+the simple GADGET-2 specification will not be merged.
+
+One additional option is to compress the fields in the files using HDF5's gzip
+compression. This is very effective for the fields such as masses or particle
+IDs which are very similar. A checksum filter is also applied in all cases to
+help with data curation.
+
+**We caution that this script is very basic and should only be used with great
+caution.** 
+
+
 
diff --git a/doc/RTD/source/NewOption/index.rst b/doc/RTD/source/NewOption/index.rst
index a7445524017fefd99d76c80a4a1ecc646874bd7a..08f1ff04efa9508145c1f7e04d72d2f40fe22f0d 100644
--- a/doc/RTD/source/NewOption/index.rst
+++ b/doc/RTD/source/NewOption/index.rst
@@ -1,4 +1,4 @@
-.. Equation of State
+.. Adding new schemes
    Loic Hausammann, 7th April 2018
 
 .. _new_option:
@@ -7,8 +7,8 @@ General information for adding new schemes
 ==========================================
 
 The following steps are required for any new options (such as new
-:ref:`hydro`, :ref:`chemistry`, :ref:`cooling`,
-:ref:`equation_of_state`, :ref:`stars` or :ref:`gravity`)
+:ref:`hydro`, chemistry, cooling,
+:ref:`equation_of_state`, stars, or gravity)
    
 In order to add a new scheme, you will need to:
 
diff --git a/doc/RTD/source/ParameterFiles/index.rst b/doc/RTD/source/ParameterFiles/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4cd0ab7bff1396c90c4dfe978b3e109db64bcab5
--- /dev/null
+++ b/doc/RTD/source/ParameterFiles/index.rst
@@ -0,0 +1,17 @@
+.. Parameter Files
+   Josh Borrow 22nd January 2019
+
+.. _Parameter_File_label:
+
+Parameter Files
+===============
+
+This section desrcibes the options that are available in the
+parameter files.
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+   parameter_description
+   output_selection
diff --git a/doc/RTD/source/GettingStarted/parameter_file.rst b/doc/RTD/source/ParameterFiles/output_selection.rst
similarity index 63%
rename from doc/RTD/source/GettingStarted/parameter_file.rst
rename to doc/RTD/source/ParameterFiles/output_selection.rst
index 550040ed25ec307633d6fade81eced58ed65a254..b84a776c7dcac2136dedd2324cfef43d7a5455ea 100644
--- a/doc/RTD/source/GettingStarted/parameter_file.rst
+++ b/doc/RTD/source/ParameterFiles/output_selection.rst
@@ -1,23 +1,17 @@
 .. Parameter File
-   Loic Hausammann, 1 june 2018
+   Loic Hausammann, 1 June 2018
 
-.. _Parameter_File_label:
-
-Parameter File
-==============
-
-To run SWIFT, you will need to provide a ``yaml`` parameter file.  An example is
-given in ``examples/parameter_file.yml`` which should contain all possible
-parameters.  Each section in this file corresponds to a different option in
-SWIFT and are not always required depending on the configuration options and
-the run time parameters.
+.. _Output_list_label:
 
 Output List
 ~~~~~~~~~~~
 
-In the sections ``Snapshots`` and ``Statistics``, you can specify the options ``output_list_on`` and ``output_list``  which receive an int and a filename.
-The ``output_list_on`` enable or not the output list and ``output_list`` is the filename containing the output times.
-With the file header, you can choose between writing redshifts, scale factors or times.
+In the sections ``Snapshots`` and ``Statistics``, you can specify the
+options ``output_list_on`` and ``output_list`` which receive an int
+and a filename.  The ``output_list_on`` enable or not the output list
+and ``output_list`` is the filename containing the output times.  With
+the file header, you can choose between writing redshifts, scale
+factors or times.
 
 Example of file containing with times (in internal units)::
 
@@ -42,6 +36,12 @@ Example of file with redshift::
   10
   5
 
+If an output list is specified, the basic values for the first
+snapshot (``time_first``, ``scale_factor_first``) and difference
+(``delta_time``) are ignored.
+  
+.. _Output_selection_label:
+
 Output Selection
 ~~~~~~~~~~~~~~~~
 
@@ -57,13 +57,13 @@ default all fields are written.
 
 This field is mostly used to remove unnecessary output by listing them
 with 0's. A classic use-case for this feature is a DM-only simulation
-(pure n-body) where all particles have the same mass. Outputing the
+(pure n-body) where all particles have the same mass. Outputting the
 mass field in the snapshots results in extra i/o time and unnecessary
 waste of disk space. The corresponding section of the ``yaml``
 parameter file would look like this::
 
   SelectOutput:
-    Masses_DM:   0 
+    Masses_DM:   0
 
 You can generate a ``yaml`` file containing all the possible fields
-available for a given configuration of SWIFT by running ``./swift -o output.yml``. 
+available for a given configuration of SWIFT by running ``./swift --output-params output.yml``.
diff --git a/doc/RTD/source/ParameterFiles/parameter_description.rst b/doc/RTD/source/ParameterFiles/parameter_description.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a4159377631fb45cc8d8f91d58d06f720a5cb785
--- /dev/null
+++ b/doc/RTD/source/ParameterFiles/parameter_description.rst
@@ -0,0 +1,940 @@
+.. Parameter Description
+   Matthieu Schaller, 21st October 2018
+
+.. _Parameters_basics:
+
+File format and basic information
+---------------------------------
+
+The parameter file uses a format similar to the `YAML format
+<https://en.wikipedia.org/wiki/YAML>`_ but reduced to only the
+elements required for the SWIFT parameters. Options are given by a
+name followed by a column and the value of the parameter:
+
+.. code:: YAML
+
+   ICs:        santa_barbara.hdf5
+   dt_max:     1.5
+   shift:      [2., 4., 5.]
+
+Comments can be inserted anywhere and start with a hash:
+
+.. code:: YAML
+
+   # Description of the physics
+   viscosity_alpha:     2.0
+   dt_max:              1.5     # seconds
+
+A typical SWIFT parameter file is split into multiple sections that
+may or may not be present depending on the different configuration
+options. The sections start with a label and can contain any number of
+parameters:
+
+.. code:: YAML
+
+   Cosmology:    # Planck13
+     Omega_m:        0.307
+     Omega_lambda:   0.693
+     Omega_b:        0.0455
+     h:              0.6777
+     a_begin:        0.0078125     # z = 127
+
+The options can be integer values, floating point numbers, characters
+or strings. If SWIFT expects a number and string is given, an error
+will be raised. The code can also read an array of values:
+
+.. code:: YAML
+
+   shift:  [2., 4., 5.]
+
+Some options in the parameter file are optional and
+when not provided, SWIFT will run with the default value. However, if
+a compulsory parameter is missing an error will be raised at
+start-up.
+
+Finally, SWIFT outputs two YAML files at the start of a run. The first one
+``used_parameters.yml`` contains all the parameters that were used for this run,
+**including all the optional parameters left unspecified with their default
+values**. This file can be used to start an exact copy of the run. The second
+file, ``unused_parameters.yml`` contains all the values that were not read from
+the parameter file. This can be used to simplify the parameter file or check
+that nothing important was ignored (for instance because the code is not
+configured to use some options).
+
+The rest of this page describes all the SWIFT parameters, split by
+section. A list of all the possible parameters is kept in the file
+``examples/parameter_examples.yml``.
+
+.. _Parameters_units:
+
+Internal Unit System
+--------------------
+
+The ``InternalUnitSystem`` section describes the units used internally by the
+code. This is the system of units in which all the equations are solved. All
+physical constants are converted to this system and if the ICs use a different
+system (see the snapshots' ref:`ICs_units_label` section of the documentation)
+the particle quantities will be converted when read in.
+
+The system of units is described using the value of the 5 basic units
+of any system with respect to the CGS system. Instead of using a unit
+of time we use a unit of velocity as this is more intuitive. Users
+hence need to provide:
+
+* a unit of length: ``UnitLength_in_cgs``,
+* a unit of mass: ``UnitMass_in_cgs``,
+* a unit of velocity ``UnitVelocity_in_cgs``,
+* a unit of electric current ``UnitCurrent_in_cgs``,
+* a unit of temperature ``UnitTemp_in_cgs``.
+
+All these need to be expressed with respect to their cgs counter-part
+(i.e. :math:`cm`, :math:`g`, :math:`cm/s`, :math:`A` and :math:`K`). Recall
+that there are no h-factors in any of SWIFT's quantities; we, for instance,
+use :math:`cm` and not :math:`cm/h`.
+
+For instance to use the commonly adopted system of 10^10 Msun as a
+unit for mass, mega-parsec as a unit of length and km/s as a unit of
+speed, we would use:
+
+.. code:: YAML
+
+   # Common unit system for cosmo sims
+   InternalUnitSystem:
+     UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+     UnitLength_in_cgs:   3.08567758e24 # 1 Mpc in centimeters
+     UnitVelocity_in_cgs: 1e5           # 1 km/s in centimeters per second
+     UnitCurrent_in_cgs:  1             # 1 Ampere
+     UnitTemp_in_cgs:     1             # 1 Kelvin
+
+Note that there are currently no variables in any of the SWIFT physics
+schemes that make use of the unit of electric current. There is also
+no incentive to use anything else than Kelvin but that makes the whole
+system consistent with any possible unit system.
+
+If one is interested in using the more humorous `FFF unit
+system <https://en.wikipedia.org/wiki/FFF_system>`_ one would use
+
+.. code:: YAML
+
+   # FFF unit system
+   InternalUnitSystem:
+     UnitMass_in_cgs:     40823.3133  # 1 Firkin (fir) in grams
+     UnitLength_in_cgs:   20116.8     # 1 Furlong (fur) in cm
+     UnitVelocity_in_cgs: 0.01663095  # 1 Furlong (fur) per Fortnight (ftn) in cm/s
+     UnitCurrent_in_cgs:  1           # 1 Ampere
+     UnitTemp_in_cgs:     1           # 1 Kelvin
+
+The value of the physical constants in this system is left as an
+exercise for the reader [#f1]_.
+
+.. _Parameters_cosmology:
+
+Cosmology
+---------
+
+When running a cosmological simulation, the section ``Cosmology`` sets the values of the
+cosmological model. The expanded :math:`\Lambda\rm{CDM}` parameters governing the
+background evolution of the Universe need to be specified here. These are:
+
+* The reduced Hubble constant: :math:`h`: ``h``,
+* The matter density parameter :math:`\Omega_m`: ``Omega_m``,
+* The cosmological constant density parameter :math:`\Omega_\Lambda`: ``Omega_lambda``,
+* The baryon density parameter :math:`\Omega_b`: ``Omega_b``,
+* The radiation density parameter :math:`\Omega_r`: ``Omega_r``.
+
+The last parameter can be omitted and will default to :math:`\Omega_r = 0`. Note
+that SWIFT will verify on start-up that the matter content of the initial conditions
+matches the cosmology specified in this section.
+
+This section also specifies the start and end of the simulation expressed in
+terms of scale-factors. The two parameters are:
+
+* Initial scale-factor: ``a_begin``,
+* Final scale-factor: ``a_end``.
+
+Two additional optional parameters can be used to change the equation of
+state of dark energy :math:`w(a)`. We use the evolution law :math:`w(a) =
+w_0 + w_a (1 - a)`. The two parameters in the YAML file are:
+
+* The :math:`z=0` dark energy equation of state parameter :math:`w_0`: ``w_0``
+* The dark energy equation of state evolution parameter :math:`w_a`: ``w_a``
+
+If unspecified these parameters default to the default
+:math:`\Lambda\rm{CDM}` values of :math:`w_0 = -1` and :math:`w_a = 0`.
+
+For a Planck+13 cosmological model (ignoring radiation density as is
+commonly done) and running from :math:`z=127` to :math:`z=0`, one would hence
+use the following parameters:
+
+.. code:: YAML
+
+   Cosmology:
+     a_begin:        0.0078125     # z = 127
+     a_end:          1.0           # z = 0
+     h:              0.6777
+     Omega_m:        0.307
+     Omega_lambda:   0.693
+     Omega_b:        0.0455
+     Omega_r:        0.            # (Optional)
+     w_0:            -1.0          # (Optional)
+     w_a:            0.            # (Optional)
+
+When running a non-cosmological simulation (i.e. without the ``-c`` run-time
+flag) this section of the YAML file is entirely ignored.
+
+.. _Parameters_gravity:
+
+Gravity
+-------
+
+The behaviour of the self-gravity solver can be modified by the parameters
+provided in the ``Gravity`` section. The theory document puts these parameters into the
+context of the equations being solved. We give a brief overview here.
+
+* The Plummer-equivalent co-moving softening length used for all particles :math:`\epsilon_{com}`: ``comoving_softening``,
+* The Plummer-equivalent maximal physical softening length used for all particles :math:`\epsilon_{max}`: ``comoving_softening``,
+
+At any redshift :math:`z`, the Plummer-equivalent softening length used by the
+code will be :math:`\epsilon=\min(\epsilon_{max},
+\frac{\epsilon_{com}}{z+1})`. This is expressed in internal units.
+
+* The opening angle (multipole acceptance criterion) used in the FMM :math:`\theta`: ``theta``,
+* The time-step size pre-factor :math:`\eta`: ``eta``,
+
+The time-step of a given particle is given by :math:`\Delta t =
+\eta\sqrt{\frac{\epsilon}{|\overrightarrow{a}|}}`, where
+:math:`\overrightarrow{a}` is the particle's acceleration. `Power et al. (2003) <http://adsabs.harvard.edu/abs/2003MNRAS.338...14P>`_ recommend using :math:`\eta=0.025`.
+The last tree-related parameter is
+
+* The tree rebuild frequency: ``rebuild_frequency``.
+
+The tree rebuild frequency is an optional parameter defaulting to
+:math:`0.01`. It is used to trigger the re-construction of the tree every time a
+fraction of the particles have been integrated (kicked) forward in time.
+
+Simulations using periodic boundary conditions use additional parameters for the
+Particle-Mesh part of the calculation. The last three are optional:
+
+* The number cells along each axis of the mesh :math:`N`: ``mesh_side_length``,
+* The mesh smoothing scale in units of the mesh cell-size :math:`a_{\rm
+  smooth}`: ``a_smooth`` (default: ``1.25``),
+* The scale above which the short-range forces are assumed to be 0 (in units of
+  the mesh cell-size multiplied by :math:`a_{\rm smooth}`) :math:`r_{\rm
+  cut,max}`: ``r_cut_max`` (default: ``4.5``),
+* The scale below which the short-range forces are assumed to be exactly Newtonian (in units of
+  the mesh cell-size multiplied by :math:`a_{\rm smooth}`) :math:`r_{\rm
+  cut,min}`: ``r_cut_min`` (default: ``0.1``),
+
+For most runs, the default values can be used. Only the number of cells along
+each axis needs to be specified. The remaining three values are best described
+in the context of the full set of equations in the theory documents.
+
+As a summary, here are the values used for the EAGLE :math:`100^3~{\rm Mpc}^3`
+simulation:
+
+.. code:: YAML
+
+   # Parameters for the self-gravity scheme for the EAGLE-100 box
+   Gravity:
+     eta:          0.025
+     theta:        0.7
+     comoving_softening:     0.0026994  # 0.7 proper kpc at z=2.8.
+     max_physical_softening: 0.0007     # 0.7 proper kpc
+     rebuild_frequency:      0.01       # Default optional value
+     mesh_side_length:       512
+     a_smooth:     1.25                 # Default optional value
+     r_cut_max:    4.5                  # Default optional value
+     r_cut_min:    0.1                  # Default optional value
+
+
+.. _Parameters_SPH:
+
+SPH
+---
+
+.. _Parameters_time_integration:
+
+Time Integration
+----------------
+
+The ``TimeIntegration`` section is used to set some general parameters related to time
+integration. In all cases, users have to provide a minimal and maximal time-step
+size:
+
+* Maximal time-step size: ``dt_max``
+* Minimal time-step size: ``dt_min``
+
+These quantities are expressed in internal units. All particles will have their
+time-step limited by the maximal value on top of all the other criteria that may
+apply to them (gravity acceleration, Courant condition, etc.). If a particle
+demands a time-step size smaller than the minimum, SWIFT will abort with an
+error message. This is a safe-guard against simulations that would never
+complete due to the number of steps to run being too large. Note that in
+cosmological runs, the meaning of these variables changes slightly. They do not
+correspond to differences in time but in logarithm of the scale-factor. For
+these runs, the simulation progresses in jumps of
+:math:`\Delta\log(a)`. ``dt_max`` is then the maximally allowed change in
+:math:`\Delta\log(a)` allowed for any particle in the simulation. This behaviour
+mimics the variables of the smae name in the Gadget code.
+
+When running a non-cosmological simulation, the user also has to provide the
+time of the start and the time of the end of the simulation:
+
+* Start time: ``time_begin``
+* End time: ``time_end``
+
+Both are expressed in internal units. The start time is typically set to ``0``
+but SWIFT can handle any value here. For cosmological runs, these values are
+ignored and the start- and end-points of the runs are specified by the start and
+end scale-factors in the cosmology section of the parameter file.
+
+Additionally, when running a cosmological volume, advanced users can specify the
+value of the dimensionless pre-factor entering the time-step condition linked
+with the motion of particles with respect to the background expansion and mesh
+size. See the theory document for the exact equations.
+
+* Dimensionless pre-factor of the maximal allowed displacement:
+  ``max_dt_RMS_factor`` (default: ``0.25``)
+
+This value rarely needs altering.
+
+A full time-step section for a non-cosmological run would be:
+
+.. code:: YAML
+
+  TimeIntegration:
+    time_begin:   0    # Start time in internal units.
+    time_end:     10.  # End time in internal units.
+    dt_max:       1e-2
+    dt_min:       1e-6
+
+Whilst for a cosmological run, one would need:
+
+.. code:: YAML
+
+  TimeIntegration:
+    dt_max:            1e-4
+    dt_min:            1e-10
+    max_dt_RMS_factor: 0.25     # Default optional value
+
+.. _Parameters_ICs:
+
+Initial Conditions
+------------------
+
+The ``InitialConditions`` section of the parameter file contains all the options related to
+the initial conditions. The main two parameters are
+
+* The name of the initial conditions file: ``file_name``,
+* Whether the problem uses periodic boundary conditions or not: ``periodic``.
+
+The file path is relative to where the code is being executed. These
+parameters can be complemented by some optional values to drive some
+specific behaviour of the code.
+
+* Whether to generate gas particles from the DM particles: ``generate_gas_in_ics`` (default: ``0``),
+* Whether to activate an additional clean-up of the SPH smoothing lengths: ``cleanup_smoothing_lengths`` (default: ``0``)
+
+The procedure used to generate gas particles from the DM ones is
+outlined in the theory documents and is too long for a full
+description here.  The cleaning of the smoothing lengths is an
+expensive operation but can be necessary in the cases where the
+initial conditions are of poor quality and the values of the smoothing
+lengths are far from the values they should have.
+
+When starting from initial conditions created for Gadget, some
+additional flags can be used to convert the values from h-full to
+h-free and remove the additional :math:`\sqrt{a}` in the velocities:
+
+* Whether to re-scale all the fields to remove powers of h from the quantities: ``cleanup_h_factors`` (default: ``0``),
+* Whether to re-scale the velocities to remove the :math:`\sqrt{a}` assumed by Gadget : ``cleanup_velocity_factors`` (default: ``0``).
+
+The h-factors are self-consistently removed according to their units
+and this is applied to all the quantities irrespective of particle
+types. The correct power of ``h`` is always calculated for each
+quantity.
+
+Finally, SWIFT also offers these options:
+
+* A factor to re-scale all the smoothing-lengths by a fixed amount: ``smoothing_length_scaling`` (default: ``1.``),
+* A shift to apply to all the particles: ``shift`` (default: ``[0.0,0.0,0.0]``),
+* Whether to replicate the box along each axis: ``replicate`` (default: ``1``).
+
+The shift is expressed in internal units. The option to replicate the
+box is especially useful for weak-scaling tests. When set to an
+integer >1, the box size is multiplied by this integer along each axis
+and the particles are duplicated and shifted such as to create exact
+copies of the simulation volume.
+
+The full section to start a DM+hydro run from Gadget DM-only ICs would
+be:
+
+.. code:: YAML
+
+   InitialConditions:
+     file_name:  my_ics.hdf5
+     periodic:                    1
+     cleanup_h_factors:           1
+     cleanup_velocity_factors:    1
+     generate_gas_in_ics:         1
+     cleanup_smoothing_lengths:   1
+
+
+.. _Parameters_constants:
+
+Physical Constants
+------------------
+
+For some idealised test it can be useful to overwrite the value of
+some physical constants; in particular the value of the gravitational
+constant. SWIFT offers an optional parameter to overwrite the value of
+:math:`G_N`.
+
+.. code:: YAML
+
+   PhysicalConstants:
+     G:   1
+
+Note that this set :math:`G` to the specified value in the internal system
+of units. Setting a value of `1` when using the system of units (10^10 Msun,
+Mpc, km/s) will mean that :math:`G_N=1` in these units [#f2]_ instead of the
+normal value :math:`G_N=43.00927`.
+
+This option is only used for specific tests and debugging. This entire
+section of the YAML file can typically be left out. More constants may
+be handled in the same way in future versions.
+
+.. _Parameters_snapshots:
+
+Snapshots
+---------
+
+The ``Snapshots`` section of the parameter file contains all the options related to
+the dump of simulation outputs in the form of HDF5 :ref:`snapshots`. The main
+parameter is the base name that will be used for all the outputs in the run:
+
+* The base name of the HDF5 snapshots: ``basename``.
+
+This name will then be appended by an under-score and 4 digits followed by
+``.hdf5`` (e.g. ``base_name_1234.hdf5``). The 4 digits are used to label the
+different outputs, starting at ``0000``. In the default setup the digits simply
+increase by one for each snapshot. However, if the optional parameter
+``int_time_label_on`` is switched on, then we use 6 digits and these will the
+physical time of the simulation rounded to the nearest integer
+(e.g. ``base_name_001234.hdf5``) [#f3]_.
+
+The time of the first snapshot is controlled by the two following options:
+
+* Time of the first snapshot (non-cosmological runs): ``time_first``,
+* Scale-factor of the first snapshot (cosmological runs): ``scale_factor_first``.
+
+One of those two parameters has to be provided depending on the type of run. In
+the case of non-cosmological runs, the time of the first snapshot is expressed
+in the internal units of time. Users also have to provide the difference in time
+(or scale-factor) between consecutive outputs:
+
+* Time difference between consecutive outputs: ``delta_time``.
+
+In non-cosmological runs this is also expressed in internal units. For
+cosmological runs, this value is *multiplied* to obtain the
+scale-factor of the next snapshot. This implies that the outputs are
+equally space in :math:`\log(a)` (See :ref:`Output_list_label` to have
+snapshots not regularly spaced in time).
+
+When running the code with structure finding activated, it is often
+useful to have a structure catalog written at the same simulation time
+as the snapshots. To activate this, the following parameter can be
+switched on:
+
+* Run VELOCIraptor every time a snapshot is dumped: ``invoke_stf``
+  (default: ``0``).
+
+This produces catalogs using the options specified for the stand-alone
+VELOCIraptor outputs (see the section :ref:`Parameters_structure_finding`) but
+with a base name and output number that matches the snapshot name
+(e.g. ``stf_base_name_1234.hdf5``) irrespective of the name specified in the
+section dedicated to VELOCIraptor. Note that the invocation of VELOCIraptor at
+every dump is done additionally to the stand-alone dumps that can be specified
+in the corresponding section of the YAML parameter file.
+
+Users can optionally specify the level of compression used by the HDF5 library
+using the parameter:
+
+* GZIP compression level of the HDF5 arrays: ``compression`` (default: ``0``).
+
+The default level of ``0`` implies no compression and values have to be in the
+range :math:`[0-9]`. This integer is passed to the i/o library and used for the
+loss-less GZIP compression algorithm. Higher values imply higher compression but
+also more time spent deflating and inflating the data. Note that up until HDF5
+1.10.x this option is not available when using the MPI-parallel version of the
+i/o routines.
+
+Finally, it is possible to specify a different system of units for the snapshots
+than the one that was used internally by SWIFT. The format is identical to the
+one described above (See the :ref:`Parameters_units` section) and read:
+
+* a unit of length: ``UnitLength_in_cgs`` (default: ``InternalUnitSystem:UnitLength_in_cgs``),
+* a unit of mass: ``UnitMass_in_cgs`` (default: ``InternalUnitSystem:UnitMass_in_cgs``),
+* a unit of velocity ``UnitVelocity_in_cgs`` (default: ``InternalUnitSystem:UnitVelocity_in_cgs``),
+* a unit of electric current ``UnitCurrent_in_cgs`` (default: ``InternalUnitSystem:UnitCurrent_in_cgs``),
+* a unit of temperature ``UnitTemp_in_cgs`` (default: ``InternalUnitSystem:UnitTemp_in_cgs``).
+
+When un-specified, these all take the same value as assumed by the internal
+system of units. These are rarely used but can offer a practical alternative to
+converting data in the post-processing of the simulations.
+
+For a standard cosmological run with structure finding activated, the
+full section would be:
+
+.. code:: YAML
+
+   Snapshots:
+     basename:            output
+     scale_factor_first:  0.02    # z = 49
+     delta_time:          1.02
+     invoke_stf:          1
+
+Showing all the parameters for a basic hydro test-case, one would have:
+
+.. code:: YAML
+
+   Snapshots:
+     basename:            sedov
+     time_first:          0.01
+     delta_time:          0.005
+     invoke_stf:          0
+     int_time_label_on:   0
+     compression:         3
+     UnitLength_in_cgs:   1.  # Use cm in outputs
+     UnitMass_in_cgs:     1.  # Use grams in outputs
+     UnitVelocity_in_cgs: 1.  # Use cm/s in outputs
+     UnitCurrent_in_cgs:  1.  # Use Ampere in outputs
+     UnitTemp_in_cgs:     1.  # Use Kelvin in outputs
+
+Some additional specific options for the snapshot outputs are described in the
+following pages:
+
+* :ref:`Output_list_label` (to have snapshots not evenly spaced in time),
+* :ref:`Output_selection_label` (to select what particle fields to write).
+
+
+.. _Parameters_statistics:
+
+Statistics
+----------
+
+Some additional specific options for the statistics outputs are described in the
+following page:
+
+* :ref:`Output_list_label` (to have statistics outputs not evenly spaced in time).
+
+.. _Parameters_restarts:
+
+Restarts
+--------
+
+SWIFT can write check-pointing files and restart from them. The behaviour of
+this mechanism is driven by the options in the ``Restarts`` section of the YAML
+parameter file. All the parameters are optional but default to values that
+ensure a reasonable behaviour.
+
+* Whether or not to enable the dump of restart files: ``enable`` (default:
+  ``1``).
+
+This parameter acts a master-switch for the check-pointing capabilities. All the
+other options require the ``enable`` parameter to be set to ``1``.
+
+* Whether or not to save a copy of the previous set of check-pointing files:
+  ``save`` (default: ``1``),
+* Whether or not to dump a set of restart file on regular exit: ``onexit``
+  (default: ``0``),
+* The wall-clock time in hours between two sets of restart files:
+  ``delta_hours`` (default: ``6.0``).
+
+Note that there is no buffer time added to the ``delta_hours`` value. If the
+system's batch queue run time limit is set to 6 hours, the user must specify a
+smaller value to allow for enough time to safely dump the check-point files.
+
+* The sub-directory in which to store the restart files: ``subdir`` (default:
+  ``restart``),
+* The basename of the restart files: ``basename`` (default: ``swift``)
+
+If the directory does not exist, SWIFT will create it.  When resuming a run,
+SWIFT, will look for files with the name provided in the sub-directory specified
+here. The files themselves are named ``basename_000001.rst`` where the basename
+is replaced by the user-specified name and the 6-digits number corresponds to
+the MPI-rank. SWIFT writes one file per MPI rank. If the ``save`` option has
+been activated, the previous set of restart files will be named
+``basename_000000.rst.prev``.
+
+SWIFT can also be stopped by creating an empty file called ``stop`` in the
+directory where the code runs. This will make SWIFT dump a fresh set of restart
+file (irrespective of the specified ``delta_time`` between dumps) and exit
+cleanly. One parameter governs this behaviour:
+
+* Number of steps between two checks for the presence of a ``stop`` file:
+  ``stop_steps`` (default: ``100``).
+
+The default value is chosen such that SWIFT does not need to poll the
+file-system to often, which can take a significant amount of time on distributed
+systems. For runs where the small time-steps take a much larger amount of time,
+a smaller value is recommended to allow for a finer control over when the code
+can be stopped.
+
+Finally, SWIFT can automatically stop after a specified amount of wall-clock
+time. The code can also run a command when exiting in this fashion, which can be
+used, for instance, to interact with the batch queue system:
+
+* Maximal wall-clock run time in hours: ``max_run_time`` (default: ``24.0``),
+* Whether or not to run a command on exit: ``resubmit_on_exit`` (default:
+  ``0``),
+* The command to run on exit: ``resubmit_command`` (default: ``./resub.sh``).
+
+Note that no check is performed on the validity of the command to run. SWIFT
+simply calls ``system()`` with the user-specified command.
+
+To run SWIFT, dumping check-pointing files every 6 hours and running for 24
+hours after which a shell command will be run, one would use:
+
+.. code:: YAML
+
+  Restarts:
+    enable:             1
+    save:               1          # Keep copies
+    onexit:             0
+    subdir:             restart    # Sub-directory of the directory where SWIFT is run
+    basename:           swift
+    delta_hours:        6.0
+    stop_steps:         100
+    max_run_time:       24.0       # In hours
+    resubmit_on_exit:   1
+    resubmit_command:   ./resub.sh
+
+.. _Parameters_scheduler:
+
+Scheduler
+---------
+
+The Scheduler section contains various parameters that control how the cell
+tree is configured and defines some values for the related tasks.  In general
+these should be considered as tuning parameters, both for speed and memory
+use.
+
+.. code:: YAML
+
+   nr_queues: 0
+
+Defines the number of task queues used. These are normally set to one per
+thread and should be at least that number.
+
+A number of parameters decide how the cell tree will be split into sub-cells,
+according to the number of particles and their expected interaction count,
+and the type of interaction. These are:
+
+.. code:: YAML
+
+  cell_max_size:             8000000
+  cell_sub_size_pair_hydro:  256000000
+  cell_sub_size_self_hydro:  32000
+  cell_sub_size_pair_grav:   256000000
+  cell_sub_size_self_grav:   32000
+  cell_sub_size_pair_stars:  256000000
+  cell_sub_size_self_stars:  32000
+  cell_split_size:           400
+
+when possible cells that exceed these constraints will be split into a further
+level of sub-cells. So for instance a sub-cell should not contain more than
+400 particles (this number defines the scale of most `N*N` interactions).
+
+To control the number of self-gravity tasks we have the parameter:
+
+.. code:: YAML
+
+  cell_subdepth_diff_grav:   4
+
+which stops these from being done at the scale of the leaf cells, of which
+there can be a large number. In this case cells with gravity tasks must be at
+least 4 levels above the leaf cells (when possible).
+
+Extra space is required when particles are created in the system (to the time
+of the next rebuild). These are controlled by:
+
+.. code:: YAML
+
+  cell_extra_parts:          0
+  cell_extra_gparts:         0
+  cell_extra_sparts:         400
+
+
+The number of top-level cells is controlled by the parameter:
+
+.. code:: YAML
+
+  max_top_level_cells:       12
+
+this is the number per dimension, we will have 12x12x12 cells. There must be
+at least 3 top-level cells per dimension.
+
+The number of top-level cells should be set so that the number of particles
+per cell is not too large, this is particularly important when using MPI as
+this defines the maximum size of cell exchange and also the size of non-local
+cells (these are used for cell interactions with local cells), which can have
+a large influence on memory use. Best advice for this is to at least scale for
+additional nodes.
+
+The memory used for holding the task and task-link lists needs to be
+pre-allocated, but cannot be pre-calculated, so we have the two parameters:
+
+.. code:: YAML
+
+  tasks_per_cell:            0.0
+  links_per_tasks:           10
+
+which are guesses at the mean numbers of tasks per cell and number of links
+per task. The tasks_per_cell value will be conservatively guessed when set to
+0.0, but you will be able to save memory by setting a value. The way to get a
+better estimate is to run SWIFT with verbose reporting on (```--verbose=1```)
+and check for the lines that report the ```per cell``` or with MPI ``maximum
+per cell``` values. This number can vary as the balance between MPI ranks
+does, so it is probably best to leave some head room.
+
+If these are exceeded you should get an obvious error message.
+
+Finally the parameter:
+
+.. code:: YAML
+
+  mpi_message_limit:         4096
+
+Defines the size (in bytes) below which MPI communication will be sent using
+non-buffered calls. These should have lower latency, but how that works or
+is honoured is an implementation question.
+
+
+.. _Parameters_domain_decomposition:
+
+Domain Decomposition:
+---------------------
+
+This section determines how the top-level cells are distributed between the
+ranks of an MPI run. An ideal decomposition should result in each rank having
+a similar amount of work to do, so that all the ranks complete at the same
+time. Achieving a good balance requires that SWIFT is compiled with either the
+ParMETIS or METIS libraries. ParMETIS is an MPI version of METIS, so is
+preferred for performance reasons.
+
+When we use ParMETIS/METIS the top-level cells of the volume are considered as
+a graph, with a cell at each vertex and edges that connect the vertices to all
+the neighbouring cells (so we have 26 edges connected to each vertex).
+Decomposing such a graph into domains is known as partitioning, so in SWIFT we
+refer to domain decomposition as partitioning.
+
+This graph of cells can have weights associated with the vertices and the
+edges. These weights are then used to guide the partitioning, seeking to
+balance the total weight of the vertices and minimize the weights of the edges
+that are cut by the domain boundaries (known as the edgecut). We can consider
+the edge weights as a proxy for the exchange of data between cells, so
+minimizing this reduces communication.
+
+The Initial Partition:
+^^^^^^^^^^^^^^^^^^^^^^
+
+When SWIFT first starts it reads the initial conditions and then does an
+initial distribution of the top-level cells. At this time the only information
+available is the cell structure and, by geometry, the particles each cell
+should contain. The type of partitioning attempted is controlled by the::
+
+  DomainDecomposition:
+    initial_type:
+
+parameter. Which can have the values *memory*, *region*, *grid* or
+*vectorized*:
+
+
+    * *memory*
+
+    This is the default if METIS or ParMETIS is available. It performs a
+    partition based on the memory use of all the particles in each cell,
+    attempting to equalize the memory used by all the ranks.
+    How successful this attempt is depends on the granularity of cells and particles
+    and the number of ranks, clearly if most of the particles are in one cell,
+    or a small region of the volume, balance is impossible or
+    difficult. Having more top-level cells makes it easier to calculate a
+    good distribution (but this comes at the cost of greater overheads).
+
+    * *region*
+
+    The one other METIS/ParMETIS option is "region". This attempts to assign equal
+    numbers of cells to each rank, with the surface area of the regions minimised
+    (so we get blobs, rather than rectangular volumes of cells).
+
+If ParMETIS and METIS are not available two other options are possible, but
+will give a poorer partition:
+
+    * *grid*
+
+    Split the cells into a number of axis aligned regions. The number of
+    splits per axis is controlled by the::
+
+       initial_grid
+
+    parameter. It takes an array of three values. The product of these values
+    must equal the number of MPI ranks. If not set a suitable default will be used.
+
+    * *vectorized*
+
+    Allocate the cells on the basis of proximity to a set of seed
+    positions. The seed positions are picked every nranks along a vectorized
+    cell list (1D representation). This is guaranteed to give an initial
+    partition for all cases when the number of cells is greater equal to the
+    number of MPI ranks, so can be used if the others fail. Don't use this.
+
+If ParMETIS and METIS are not available then only an initial partition will be
+performed. So the balance will be compromised by the quality of the initial
+partition.
+
+Repartitioning:
+^^^^^^^^^^^^^^^
+
+When ParMETIS or METIS is available we can consider adjusting the balance
+during the run, so we can improve from the initial partition and also track
+changes in the run that require a different balance. The initial partition is
+usually not optimal as although it may have balanced the distribution of
+particles it has not taken account of the fact that different particles types
+require differing amounts of processing and we have not considered that we
+also need to do work requiring communication between cells. This latter point
+is important as we are running an MPI job, as inter-cell communication may be
+very expensive.
+
+There are a number of possible repartition strategies which are defined using
+the::
+
+  DomainDecomposition:
+    repartition_type:
+
+parameter. The possible values for this are *none*, *fullcosts*, *edgecosts*,
+*memory*, *timecosts*.
+
+    * *none*
+
+    Rather obviously, don't repartition. You are happy to run with the
+    initial partition.
+
+    * *fullcosts*
+
+    Use computation weights derived from the running tasks for the vertex and
+    edge weights. This is the default.
+
+    * *edgecosts*
+
+    Only use computation weights derived from the running tasks for the edge
+    weights.
+
+    * *memory*
+
+    Repeat the initial partition with the current particle positions
+    re-balancing the memory use.
+
+    * *timecosts*
+
+    Only use computation weights derived from the running tasks for the vertex
+    weights and the expected time the particles will interact in the cells as
+    the edge weights. Using time as the edge weight has the effect of keeping
+    very active cells on single MPI ranks, so can reduce MPI communication.
+
+The computation weights are actually the measured times, in CPU ticks, that
+tasks associated with a cell take. So these automatically reflect the relative
+cost of the different task types (SPH, self-gravity etc.), and other factors
+like how well they run on the current hardware and are optimized by the
+compiler used, but this means that we have a constraint on how often we can
+consider repartitioning, namely when all (or nearly all) the tasks of the
+system have been invoked in a step. To control this we have the::
+
+    minfrac:     0.9
+
+parameter. Which defines the minimum fraction of all the particles in the
+simulation that must have been actively updated in the last step, before
+repartitioning is considered.
+
+That then leaves the question of when a run is considered to be out of balance
+and should benefit from a repartition. That is controlled by the::
+
+    trigger:          0.05
+
+parameter. This value is the CPU time difference between MPI ranks, as a
+fraction, if less than this value a repartition will not be
+done. Repartitioning can be expensive not just in CPU time, but also because
+large numbers of particles can be exchanged between MPI ranks, so is best
+avoided.
+
+If you are using ParMETIS there additional ways that you can tune the
+repartition process.
+
+METIS only offers the ability to create a partition from a graph, which means
+that each solution is independent of those that have already been made, that
+can make the exchange of particles very large (although SWIFT attempts to
+minimize this), however, using ParMETIS we can use the existing partition to
+inform the new partition, this has two algorithms that are controlled using::
+
+    adaptive:         1
+
+which means use adaptive repartition, otherwise simple refinement. The
+adaptive algorithm is further controlled by the::
+
+    itr:              100
+
+parameter, which defines the ratio of inter node communication time to data
+redistribution time, in the range 0.00001 to 10000000.0. Lower values give
+less data movement during redistributions. The best choice for these can only
+be determined by experimentation (the gains are usually small, so not really
+recommended).
+
+Finally we have the parameter::
+
+    usemetis:         0
+
+Forces the use of the METIS API, probably only useful for developers.
+
+**Fixed cost repartitioning:**
+
+So far we have assumed that repartitioning will only happen after a step that
+meets the `minfrac:` and `trigger:` criteria, but we may want to repartition
+at some arbitrary steps, and indeed do better than the initial partition
+earlier in the run. This can be done using *fixed cost* repartitioning.
+
+Fixed costs are output during each repartition step into the file
+`partition_fixed_costs.h`, this should be created by a test run of your your
+full simulation (with possibly with a smaller volume, but all the physics
+enabled). This file can then be used to replace the same file found in the
+`src/` directory and SWIFT should then be recompiled. Once you have that, you
+can use the parameter::
+
+    use_fixed_costs:  1
+
+to control whether they are used or not. If enabled these will be used to
+repartition after the second step, which will generally give as good a
+repartition immediately as you get at the first unforced repartition.
+
+Also once these have been enabled you can change the `trigger:` value to
+numbers greater than 2, and repartitioning will be forced every `trigger`
+steps. This latter option is probably only useful for developers, but tuning
+the second step to use fixed costs can give some improvements.
+
+.. _Parameters_structure_finding:
+
+Structure finding (VELOCIraptor)
+--------------------------------
+
+
+.. [#f1] The thorough reader (or overly keen SWIFT tester) would find  that the speed of light is :math:`c=1.8026\times10^{12}\,\rm{fur}\,\rm{ftn}^{-1}`, Newton's constant becomes :math:`G_N=4.896735\times10^{-4}~\rm{fur}^3\,\rm{fir}^{-1}\,\rm{ftn}^{-2}` and Planck's constant turns into :math:`h=4.851453\times 10^{-34}~\rm{fur}^2\,\rm{fir}\,\rm{ftn}^{-1}`.
+
+
+.. [#f2] which would translate into a constant :math:`G_N=1.5517771\times10^{-9}~cm^{3}\,g^{-1}\,s^{-2}` if expressed in the CGS system.
+
+.. [#f3] This feature only makes sense for non-cosmological runs for which the
+         internal time unit is such that when rounded to the nearest integer a
+	 sensible number is obtained. A use-case for this feature would be to
+	 compare runs over the same physical time but with different numbers of
+	 snapshots. Snapshots at a given time would always have the same set of
+	 digits irrespective of the number of snapshots produced before.
+
diff --git a/doc/RTD/source/Snapshots/index.rst b/doc/RTD/source/Snapshots/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..30cdc0e1281ae0420b44d88001992ccbbe588136
--- /dev/null
+++ b/doc/RTD/source/Snapshots/index.rst
@@ -0,0 +1,199 @@
+.. Snapshots
+   Matthieu Schaller, 5th January 2019
+
+.. _snapshots:
+
+Snapshots
+=========
+
+The snapshots are stored using the HDF5 format and are almost compatible with
+Gadget-2 (fully compatible outside of cosmological runs). They do, however,
+contain a large set of extensions including units, meta-data about the code and
+runs as well as facilities to quickly access the particles in a specific region
+of the simulation volume.
+
+Header
+------
+
+Meta-data about the code and run
+--------------------------------
+
+Several groups at the root of the files only contain attributes and are used to
+store some meta-data about the simulation and the code itself.
+
+Code
+~~~~
+
+The group ``/Code`` contains basic information about the version of the code
+that was used to run the simulation that dumped this snapshot. Versions of the
+libraries used to compile the code as well as information about the compiler and
+the flags used are stored. The most important element here are the git SHA and
+configuration parameters of the code. Alongside the compiler flags, policies and
+used parameters, these allow to reproduce exactly an older run.
+
+Cosmology
+~~~~~~~~~
+
+The group ``/Cosmology`` contains information about the cosmological model used
+for this simulation. The first important field is the attribute ``Cosmological
+run`` which is set to ``1`` for cosmological runs and to ``0`` otherwise. This
+allows users to quickly distinguish between these two main modes. Most values in
+this section only make sense for cosmological runs.
+
+All quantities are expressed in the internal system of units (note that this may
+differ from the units used in the particle arrays). Values like the look-back
+time are given for the redshift (or scale-factor) of this snapshot.
+
+Policy
+~~~~~~
+
+The group ``/Policy`` list the engine policies (defined in ``src/engine.h``)
+that were activated in the run that dumped this snapshot. The policies roughly
+translate to the main run-time parameters of SWIFT.
+
+GravityScheme
+~~~~~~~~~~~~~
+
+HydroScheme
+~~~~~~~~~~~
+
+StarsScheme
+~~~~~~~~~~~
+
+SubgridScheme
+~~~~~~~~~~~~~
+
+Unit systems
+------------
+
+The snapshots contain *two* groups at the root containing information about the
+unit systems used in the snapshots.
+
+The main one ``Units`` contains the units used in the snapshot. In a similar
+fashion to what is done for the parameter files (see :ref:`Parameters_units`),
+SWIFT specifies only the basic units. These are the units of mass (``U_M``),
+length (``U_L``), time (``U_t``), electric current (``U_I``) and temperature
+(``U_T``). These are specified in units of their CGS equivalents (gram,
+centimeter, second, Ampere, Kelvin). All the quantities present in the particle
+arrays are expressed in this system of units. For each quantity, SWIFT gives the
+conversion factor in terms of these units. For instance, the internal energy per
+unit mass would be expressed as ``U_L^2 U_t^-2``, which in the CGS unit system
+translates to :math:`cm/s^2 = erg/g`.
+
+The second group ``InternalCodeUnits`` contains the unit system that was used
+internally by the code when running the simulation. This is in most cases the
+same system as given in ``Units`` but since users can specify a different
+system for the snapshots, there might be cases where they differ. As this system
+only relates to what was used inside the code and not in the snapshots
+themselves, this group is mostly here to report on the code's run-time behaviour
+and is used to express all the quantities in the meta-data (e.g. in the
+cosmology group or the softening lengths in the gravity group).
+
+Used and unused run-time parameters
+-----------------------------------
+
+The groups ``/Parameters`` and ``UnusedParameters`` located at the root of the file
+contain the list of all the run-time parameters used by the run with their
+values and the list of parameters that were in the YAML but were not read. The
+content of these two groups is identical to the ``used_parameters.yml`` and
+``unused_parameters.yml`` files produced by SWIFT when starting a run (See
+the :ref:`Parameters_basics` section of the documentation).
+
+Structure of the particle arrays
+--------------------------------
+
+There are several groups that contain 'auxiliary' information, such as
+``Header``.  Particle data is placed in separate groups depending of the type of
+the particles. The type use the naming convention of Gadget-2 (with
+the OWLS and EAGLE extensions).
+
++---------------------+------------------------+----------------------------+
+| HDF5 Group Name     | Physical Particle Type | In code ``enum part_type`` |
++=====================+========================+============================+
+| ``/PartType0/``     | Gas                    | ``swift_type_gas``         |
++---------------------+------------------------+----------------------------+
+| ``/PartType1/``     | Dark Matter            | ``swift_type_dark_matter`` |
++---------------------+------------------------+----------------------------+
+| ``/PartType4/``     | Stars                  | ``swift_type_star``        |
++---------------------+------------------------+----------------------------+
+| ``/PartType5/``     | Black Holes            | ``swift_type_black_hole``  |
++---------------------+------------------------+----------------------------+
+
+The last column in the table gives the ``enum`` value from ``part_type.h``
+corresponding to a given entry in the files.
+
+Quick access to particles via hash-tables
+-----------------------------------------
+
+The particles are not sorted in a specific order when they are written to the
+snapshots. However, the particles are sorted into the top-level cell structure
+used internally by the code every time a tree rebuild is triggered. The
+top-level cells are a coarse-grained mesh but knowing which particle belongs to
+which cell can nevertheless be useful to rapidly access particles in a given
+region only.
+
+One important caveat is that particles are free to drift out of their cells
+between rebuilds of the tree (but not by more than one cell-length). If one
+wants to have all the particles in a given cell, one has to read all the
+neighbouring cells as well. We note that for image making purposes, for instance
+to generate a slice, this is typically not necessary and reading just the cells
+of interest is sufficient.
+
+At the root of the HDF5 file, the ``Cells`` group contains all the relevant
+information. The dimension of the top-level grid (a triplet of integers) is
+given by the attribute ``Cells/Meta-data/dimension`` and the size of each cell (a
+triplet of floating-point numbers) is given by the attribute
+``Cells/Meta-data/size``. All the cells have the same size but for non-cubic
+simulation volumes the cells themselves can have different sizes along each
+axis.
+
+The ``/Cells/Centres`` array gives the centre of each of the top-level cells in the
+simulation volume. Both the cell sizes and positions of the centres are
+expressed in the unit system used for the snapshots (see above) and are hence
+consistent with the particle positions themselves.
+
+Once the cell(s) containing the region of interest has been located, users can
+use the ``/Cells/Offsets/PartTypeN/Counts`` and
+``/Cells/Offsets/PartTypeN/Offsets`` to retrieve the location of the particles
+of type ``N`` in the ``/PartTypeN`` arrays. For instance, if one is interested
+in retriving all the densities of the gas particles in the cell around the
+position `[1, 1, 1]` one could use a piece of code similar to:
+
+.. code-block:: python
+   :linenos:
+
+   import numpy as np
+   import h5py
+
+   snapshot_file = h5py.File("snapshot.hdf5", "r")
+
+   my_pos = [1, 1, 1]
+
+   # Read in the cell centres and size
+   nr_cells = f["/Cells/Meta-data"].attrs["nr_cells"]
+   centres = f["/Cells/Centres"][:,:]
+   size = f["/Cells/Meta-data"].attrs["size"]
+   half_size = size / 2.
+
+   # Look for the cell containing the position of interest
+   my_cell = -1
+   for i in range(nr_cells):
+      if my_pos[0] > centres[i, 0] - half_size[0] and my_pos[0] < centres[i, 0] + half_size[0] and
+         my_pos[1] > centres[i, 1] - half_size[1] and my_pos[1] < centres[i, 1] + half_size[1] and
+         my_pos[2] > centres[i, 2] - half_size[2] and my_pos[2] < centres[i, 2] + half_size[2]:
+	 my_cell = i
+	 break
+   
+   # Print the position of the centre of the cell of interest
+   centre = snapshot_file["/Cells/Centres"][my_cell, :]
+   print("Centre of the cell:", centre)
+
+   # Retrieve the offset and counts
+   my_offset = snapshot_file["/Cells/Offsets/PartType0"][my_cell]
+   my_count = snapshot_file["/Cells/Counts/PartType0"][my_cell]
+
+   # Get the densities of the particles in this cell
+   rho = snapshot_file["/PartType0/Density"][my_offset:my_offset + my_count]
+
+For large simulations, this vastly reduces the amount of data that needs to be read
+from the disk.
diff --git a/doc/RTD/source/SubgridModels/Basic/index.rst b/doc/RTD/source/SubgridModels/Basic/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..031a1ed61bb1cae4f916b58b926f5269b10c7057
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/Basic/index.rst
@@ -0,0 +1,51 @@
+.. Basic sub-grid model
+   Matthieu Schaller, 20th December 2018
+
+
+Basic model
+===========
+
+
+Cooling: Analytic models
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Currently, we have 3 different simple cooling models (const-lambda, const-du
+and Compton). These are all based on analytic formulas and can be used
+to quickly understand how the cooling interacts with the rest of the
+code before moving to more complex models.
+
+Equations
+---------
+
+The first table compares the different analytical cooling while the next ones
+are specific to a given cooling.  The quantities are the internal energy (\\( u
+\\)), the density \\( rho \\), the element mass fraction (\\( X_i \\)), the
+cooling function (\\(\\Lambda\\), the proton mass (\\( m_H \\)) and the time
+step condition (\\( t\_\\text{step}\\)).  If not specified otherwise, all
+cooling contains a temperature floor avoiding negative temperature.
+
+.. csv-table:: Analytical Cooling
+   :header: "Variable", "Const-Lambda", "Const-du"
+
+   "\\( \\frac{ \\mathrm{d}u }{ \\mathrm{d}t } \\)", "\\( -\\Lambda \\frac{\\rho^2 X_H^2}{\\rho m_H^2} \\)", "const"
+   "\\( \\Delta t\_\\text{max} \\)", "\\( t\_\\text{step} \\frac{u}{\\left|\\frac{ \\mathrm{d}u }{ \\mathrm{d}t }\\right|} \\)", "\\( t\_\\text{step} \\frac{u}{\\ \\left| \\frac{ \\mathrm{d}u }{ \\mathrm{d}t }\\right|} \\)"
+
+TODO: Add description of the parameters and units.
+
+TODO: Add Compton cooling model
+
+How to Implement a New Cooling
+------------------------------
+
+The developer should provide at least one function for:
+ * writing the cooling name in HDF5
+ * cooling a particle
+ * the maximal time step possible
+ * initializing a particle
+ * computing the total energy radiated by a particle
+ * initializing the cooling parameters
+ * printing the cooling type
+
+For implementation details, see ``src/cooling/none/cooling.h``
+
+See :ref:`new_option` for the full list of changes required.
diff --git a/doc/RTD/source/SubgridModels/EAGLE/EAGLE_entropy_floor.svg b/doc/RTD/source/SubgridModels/EAGLE/EAGLE_entropy_floor.svg
new file mode 100644
index 0000000000000000000000000000000000000000..383b074fc947ed072dc50015152ba694b929c611
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/EAGLE/EAGLE_entropy_floor.svg
@@ -0,0 +1,2102 @@
+<?xml version="1.0" encoding="utf-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
+  "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Created with matplotlib (http://matplotlib.org/) -->
+<svg height="226pt" version="1.1" viewBox="0 0 226 226" width="226pt" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
+ <defs>
+  <style type="text/css">
+*{stroke-linecap:butt;stroke-linejoin:round;}
+  </style>
+ </defs>
+ <g id="figure_1">
+  <g id="patch_1">
+   <path d="M 0 226.8 
+L 226.8 226.8 
+L 226.8 0 
+L 0 0 
+z
+" style="fill:#ffffff;"/>
+  </g>
+  <g id="axes_1">
+   <g id="patch_2">
+    <path d="M 34.02 197.316 
+L 224.532 197.316 
+L 224.532 2.268 
+L 34.02 2.268 
+z
+" style="fill:#ffffff;"/>
+   </g>
+   <g id="PolyCollection_1">
+    <defs>
+     <path d="M 77.714425 -156.365649 
+L 77.714425 -14.805175 
+L 250.907152 -14.805175 
+L 250.907152 -156.365649 
+L 250.907152 -156.365649 
+L 77.714425 -156.365649 
+z
+" id="m8f9d87720a" style="stroke:#e6e6e6;"/>
+    </defs>
+    <g clip-path="url(#p107a2e5a22)">
+     <use style="fill:#e6e6e6;stroke:#e6e6e6;" x="0" xlink:href="#m8f9d87720a" y="226.8"/>
+    </g>
+   </g>
+   <g id="PolyCollection_2">
+    <path clip-path="url(#p107a2e5a22)" d="M 146.991516 49022.756825 
+L 146.991516 85.113175 
+L 250.907152 -12.410825 
+L 250.907152 49022.756825 
+L 250.907152 49022.756825 
+L 146.991516 49022.756825 
+z
+" style="fill:#e6e6e6;stroke:#e6e6e6;"/>
+   </g>
+   <g id="PathCollection_1">
+    <defs>
+     <path d="M 0 1 
+C 0.265203 1 0.51958 0.894634 0.707107 0.707107 
+C 0.894634 0.51958 1 0.265203 1 0 
+C 1 -0.265203 0.894634 -0.51958 0.707107 -0.707107 
+C 0.51958 -0.894634 0.265203 -1 0 -1 
+C -0.265203 -1 -0.51958 -0.894634 -0.707107 -0.707107 
+C -0.894634 -0.51958 -1 -0.265203 -1 0 
+C -1 0.265203 -0.894634 0.51958 -0.707107 0.707107 
+C -0.51958 0.894634 -0.265203 1 0 1 
+z
+" id="me37d9803e9" style="stroke:#000000;"/>
+    </defs>
+    <g clip-path="url(#p107a2e5a22)">
+     <use style="stroke:#000000;" x="77.714425" xlink:href="#me37d9803e9" y="70.434351"/>
+    </g>
+   </g>
+   <g id="PathCollection_2">
+    <g clip-path="url(#p107a2e5a22)">
+     <use style="stroke:#000000;" x="146.991516" xlink:href="#me37d9803e9" y="85.113175"/>
+    </g>
+   </g>
+   <g id="matplotlib.axis_1">
+    <g id="xtick_1">
+     <g id="line2d_1">
+      <defs>
+       <path d="M 0 0 
+L 0 3.5 
+" id="mddab006ef2" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="60.395152" xlink:href="#mddab006ef2" y="197.316"/>
+      </g>
+     </g>
+     <g id="text_1">
+      <!-- $10^{-6}$ -->
+      <defs>
+       <path d="M 29.09375 67.796875 
+L 11.09375 58.703125 
+L 11.09375 57.296875 
+C 12.296875 57.796875 13.40625 58.203125 13.796875 58.40625 
+C 15.59375 59.109375 17.296875 59.5 18.296875 59.5 
+C 20.40625 59.5 21.296875 57.984375 21.296875 54.765625 
+L 21.296875 9.25 
+C 21.296875 5.921875 20.5 3.625 18.90625 2.703125 
+C 17.40625 1.8125 16 1.5 11.796875 1.5 
+L 11.796875 0 
+L 39.40625 0 
+L 39.40625 1.5 
+C 31.5 1.5 29.90625 2.5 29.90625 7.34375 
+L 29.90625 67.609375 
+z
+" id="Nimbus_Roman_No9_L_Regular-49"/>
+       <path d="M 25.40625 68 
+C 19.90625 68 15.703125 66.3125 12 62.8125 
+C 6.203125 57.203125 2.40625 45.703125 2.40625 34 
+C 2.40625 23.109375 5.703125 11.40625 10.40625 5.8125 
+C 14.09375 1.40625 19.203125 -1 25 -1 
+C 30.09375 -1 34.40625 0.703125 38 4.203125 
+C 43.796875 9.703125 47.59375 21.3125 47.59375 33.40625 
+C 47.59375 53.90625 38.5 68 25.40625 68 
+z
+M 25.09375 65.40625 
+C 33.5 65.40625 38 54.109375 38 33.203125 
+C 38 12.3125 33.59375 1.609375 25 1.609375 
+C 16.40625 1.609375 12 12.3125 12 33.109375 
+C 12 54.3125 16.5 65.40625 25.09375 65.40625 
+z
+" id="Nimbus_Roman_No9_L_Regular-48"/>
+       <path d="M 65.90625 23 
+C 67.59375 23 69.40625 23 69.40625 25 
+C 69.40625 27 67.59375 27 65.90625 27 
+L 11.796875 27 
+C 10.09375 27 8.296875 27 8.296875 25 
+C 8.296875 23 10.09375 23 11.796875 23 
+z
+" id="CMSY10-0"/>
+       <path d="M 44.59375 68.609375 
+C 33.203125 67.609375 27.40625 65.703125 20.09375 60.609375 
+C 9.296875 52.90625 3.40625 41.5 3.40625 28.15625 
+C 3.40625 19.5 6.09375 10.75 10.40625 5.78125 
+C 14.203125 1.390625 19.59375 -1 25.796875 -1 
+C 38.203125 -1 46.796875 8.453125 46.796875 22.203125 
+C 46.796875 34.9375 39.5 43 28 43 
+C 23.59375 43 21.5 42.296875 15.203125 38.5 
+C 17.90625 53.609375 29.09375 64.40625 44.796875 67 
+z
+M 24.203125 38.40625 
+C 32.796875 38.40625 37.796875 31.25 37.796875 18.8125 
+C 37.796875 7.875 33.90625 1.8125 26.90625 1.8125 
+C 18.09375 1.8125 12.703125 11.15625 12.703125 26.5625 
+C 12.703125 31.640625 13.5 34.421875 15.5 35.921875 
+C 17.59375 37.5 20.703125 38.40625 24.203125 38.40625 
+z
+" id="Nimbus_Roman_No9_L_Regular-54"/>
+      </defs>
+      <g transform="translate(50.706631 211.234498)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#CMSY10-0"/>
+       <use transform="translate(156.909271 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-54"/>
+      </g>
+     </g>
+    </g>
+    <g id="xtick_2">
+     <g id="line2d_2">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="95.033698" xlink:href="#mddab006ef2" y="197.316"/>
+      </g>
+     </g>
+     <g id="text_2">
+      <!-- $10^{-4}$ -->
+      <defs>
+       <path d="M 47.203125 23.390625 
+L 37 23.390625 
+L 37 68 
+L 32.59375 68 
+L 1.203125 23.390625 
+L 1.203125 17 
+L 29.296875 17 
+L 29.296875 0.5 
+L 37 0.5 
+L 37 17 
+L 47.203125 17 
+z
+M 29.203125 23.390625 
+L 5.203125 23.390625 
+L 29.203125 57.78125 
+z
+" id="Nimbus_Roman_No9_L_Regular-52"/>
+      </defs>
+      <g transform="translate(85.345176 211.234498)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#CMSY10-0"/>
+       <use transform="translate(156.909271 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-52"/>
+      </g>
+     </g>
+    </g>
+    <g id="xtick_3">
+     <g id="line2d_3">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="129.672243" xlink:href="#mddab006ef2" y="197.316"/>
+      </g>
+     </g>
+     <g id="text_3">
+      <!-- $10^{-2}$ -->
+      <defs>
+       <path d="M 47.5 13.671875 
+L 46.203125 14.171875 
+C 42.5 8.5 41.203125 7.59375 36.703125 7.59375 
+L 12.796875 7.59375 
+L 29.59375 25.140625 
+C 38.5 34.40625 42.40625 41.984375 42.40625 49.765625 
+C 42.40625 59.734375 34.296875 67.390625 23.90625 67.390625 
+C 18.40625 67.390625 13.203125 65.203125 9.5 61.21875 
+C 6.296875 57.8125 4.796875 54.625 3.09375 47.546875 
+L 5.203125 47.046875 
+C 9.203125 56.8125 12.796875 60 19.703125 60 
+C 28.09375 60 33.796875 54.3125 33.796875 45.9375 
+C 33.796875 38.171875 29.203125 28.90625 20.796875 20.03125 
+L 3 1.203125 
+L 3 0 
+L 42 0 
+z
+" id="Nimbus_Roman_No9_L_Regular-50"/>
+      </defs>
+      <g transform="translate(119.983722 211.234498)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#CMSY10-0"/>
+       <use transform="translate(156.909271 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-50"/>
+      </g>
+     </g>
+    </g>
+    <g id="xtick_4">
+     <g id="line2d_4">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="164.310789" xlink:href="#mddab006ef2" y="197.316"/>
+      </g>
+     </g>
+     <g id="text_4">
+      <!-- $10^{0}$ -->
+      <g transform="translate(157.486382 211.234498)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+      </g>
+     </g>
+    </g>
+    <g id="xtick_5">
+     <g id="line2d_5">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="198.949334" xlink:href="#mddab006ef2" y="197.316"/>
+      </g>
+     </g>
+     <g id="text_5">
+      <!-- $10^{2}$ -->
+      <g transform="translate(192.124927 211.234498)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-50"/>
+      </g>
+     </g>
+    </g>
+    <g id="text_6">
+     <!-- ${\rm Density}~n_{\rm H}~[{\rm cm^{-3}}]$ -->
+     <defs>
+      <path d="M 10.40625 10.609375 
+C 10.40625 3.390625 9.203125 2.09375 1.59375 2.09375 
+L 1.59375 0 
+L 30 0 
+C 41.59375 0 52.09375 3.28125 58.296875 8.875 
+C 64.796875 14.75 68.5 23.625 68.5 33.296875 
+C 68.5 42.28125 65.5 50.046875 60 55.53125 
+C 53.203125 62.40625 42.296875 66 28.59375 66 
+L 1.59375 66 
+L 1.59375 64.09375 
+C 9.5 63.390625 10.40625 62.5 10.40625 55.078125 
+z
+M 20.59375 58.40625 
+C 20.59375 61.5 21.703125 62.296875 25.796875 62.296875 
+C 34.40625 62.296875 40.90625 60.703125 45.703125 57.3125 
+C 53.5 51.921875 57.59375 43.359375 57.59375 32.609375 
+C 57.59375 20.84375 53.5 12.46875 45.40625 7.78125 
+C 40.296875 4.90625 34.5 3.703125 25.796875 3.703125 
+C 21.796875 3.703125 20.59375 4.59375 20.59375 7.78125 
+z
+" id="Nimbus_Roman_No9_L_Regular-68"/>
+      <path d="M 40.796875 16.453125 
+C 36 8.8125 31.703125 5.90625 25.296875 5.90625 
+C 19.59375 5.90625 15.296875 8.8125 12.40625 14.53125 
+C 10.59375 18.34375 9.90625 21.65625 9.703125 27.796875 
+L 40.5 27.796875 
+C 39.703125 34.265625 38.703125 37.140625 36.203125 40.328125 
+C 33.203125 43.921875 28.59375 46 23.40625 46 
+C 18.40625 46 13.703125 44.203125 9.90625 40.796875 
+C 5.203125 36.703125 2.5 29.59375 2.5 21.40625 
+C 2.5 7.59375 9.703125 -1 21.203125 -1 
+C 30.703125 -1 38.203125 4.921875 42.40625 15.75 
+z
+M 9.90625 31 
+C 11 38.734375 14.40625 42.40625 20.5 42.40625 
+C 26.59375 42.40625 29 39.609375 30.296875 31 
+z
+" id="Nimbus_Roman_No9_L_Regular-101"/>
+      <path d="M 1.59375 40.296875 
+C 2.203125 40.59375 3.203125 40.703125 4.296875 40.703125 
+C 7.09375 40.703125 8 39.203125 8 34.296875 
+L 8 9.5 
+C 8 3.796875 6.90625 2.40625 1.796875 2 
+L 1.796875 0.5 
+L 23 0.5 
+L 23 2 
+C 17.90625 2.40625 16.40625 3.59375 16.40625 7.203125 
+L 16.40625 35.296875 
+C 21.203125 39.796875 23.40625 41 26.703125 41 
+C 31.59375 41 34 37.90625 34 31.296875 
+L 34 10.40625 
+C 34 4.09375 32.703125 2.40625 27.703125 2 
+L 27.703125 0.5 
+L 48.5 0.5 
+L 48.5 2 
+C 43.59375 2.5 42.40625 3.703125 42.40625 8.59375 
+L 42.40625 31.5 
+C 42.40625 40.90625 38 46.5 30.59375 46.5 
+C 26 46.5 22.90625 44.796875 16.09375 38.40625 
+L 16.09375 46.296875 
+L 15.40625 46.5 
+C 10.5 44.703125 7.09375 43.59375 1.59375 42 
+z
+" id="Nimbus_Roman_No9_L_Regular-110"/>
+      <path d="M 31.5 31.40625 
+L 31.09375 45 
+L 30 45 
+L 29.796875 44.796875 
+C 28.90625 44.09375 28.796875 44 28.40625 44 
+C 27.796875 44 26.796875 44.203125 25.703125 44.703125 
+C 23.5 45.546875 21.296875 46 18.703125 46 
+C 10.796875 46 5.09375 40.875 5.09375 33.65625 
+C 5.09375 28.0625 8.296875 24.046875 16.796875 19.234375 
+L 22.59375 15.9375 
+C 26.09375 13.9375 27.796875 11.515625 27.796875 8.421875 
+C 27.796875 4 24.59375 1.203125 19.5 1.203125 
+C 16.09375 1.203125 13 2.484375 11.09375 4.65625 
+C 9 7.109375 8.09375 9.375 6.796875 15 
+L 5.203125 15 
+L 5.203125 -0.609375 
+L 6.5 -0.609375 
+C 7.203125 0.515625 7.59375 0.75 8.796875 0.75 
+C 9.703125 0.75 11.09375 0.546875 13.40625 -0.03125 
+C 16.203125 -0.609375 18.90625 -1 20.703125 -1 
+C 28.40625 -1 34.796875 4.8125 34.796875 11.828125 
+C 34.796875 16.828125 32.40625 20.140625 26.40625 23.75 
+L 15.59375 30.15625 
+C 12.796875 31.765625 11.296875 34.28125 11.296875 36.984375 
+C 11.296875 41 14.40625 43.796875 19 43.796875 
+C 24.703125 43.796875 27.703125 40.359375 30 31.40625 
+z
+" id="Nimbus_Roman_No9_L_Regular-115"/>
+      <path d="M 17.5 45.765625 
+L 2 40.296875 
+L 2 38.8125 
+L 2.796875 38.90625 
+C 4 39.09375 5.296875 39.203125 6.203125 39.203125 
+C 8.59375 39.203125 9.5 37.59375 9.5 33.203125 
+L 9.5 10 
+C 9.5 2.796875 8.5 1.703125 1.59375 1.703125 
+L 1.59375 0 
+L 25.296875 0 
+L 25.296875 1.5 
+C 18.703125 2 17.90625 2.984375 17.90625 10.15625 
+L 17.90625 45.46875 
+z
+M 12.796875 68 
+C 10.09375 68 7.796875 65.703125 7.796875 62.90625 
+C 7.796875 60.109375 10 57.796875 12.796875 57.796875 
+C 15.703125 57.796875 18 60 18 62.90625 
+C 18 65.703125 15.703125 68 12.796875 68 
+z
+" id="Nimbus_Roman_No9_L_Regular-105"/>
+      <path d="M 25.5 45 
+L 15.40625 45 
+L 15.40625 56.59375 
+C 15.40625 57.59375 15.296875 57.90625 14.703125 57.90625 
+C 14 57 13.40625 56.09375 12.703125 55.09375 
+C 8.90625 49.59375 4.59375 44.796875 3 44.390625 
+C 1.90625 43.65625 1.296875 42.9375 1.296875 42.421875 
+C 1.296875 42.109375 1.40625 41.90625 1.703125 41.90625 
+L 7 41.90625 
+L 7 11.734375 
+C 7 3.3125 10 -1 15.90625 -1 
+C 20.796875 -1 24.59375 1.40625 27.90625 6.609375 
+L 26.59375 7.703125 
+C 24.5 5.203125 22.796875 4.203125 20.59375 4.203125 
+C 16.90625 4.203125 15.40625 6.921875 15.40625 13.234375 
+L 15.40625 41.90625 
+L 25.5 41.90625 
+z
+" id="Nimbus_Roman_No9_L_Regular-116"/>
+      <path d="M 47.5 45 
+L 34 45 
+L 34 43.5 
+C 37.203125 43.5 38.796875 42.59375 38.796875 40.984375 
+C 38.796875 40.578125 38.703125 39.984375 38.40625 39.28125 
+L 28.703125 11.59375 
+L 17.203125 37.140625 
+C 16.59375 38.5625 16.203125 39.875 16.203125 40.96875 
+C 16.203125 42.796875 17.703125 43.5 22 43.5 
+L 22 45 
+L 1.40625 45 
+L 1.40625 43.59375 
+C 4 43.203125 5.703125 42.09375 6.5 40.390625 
+L 17.90625 15.703125 
+L 18.203125 14.890625 
+L 19.703125 11.890625 
+C 22.5 6.875 24.09375 3.265625 24.09375 1.75 
+C 24.09375 0.25 21.796875 -6.078125 20.09375 -9.09375 
+C 18.703125 -11.703125 16.5 -13.609375 15.09375 -13.609375 
+C 14.5 -13.609375 13.59375 -13.40625 12.59375 -12.90625 
+C 10.703125 -12.203125 9 -11.796875 7.296875 -11.796875 
+C 5 -11.796875 3 -13.796875 3 -16.203125 
+C 3 -19.5 6.203125 -22 10.40625 -22 
+C 17.09375 -22 21.90625 -16.390625 27.296875 -1.9375 
+L 42.703125 38.984375 
+C 44 42.203125 45.09375 43.203125 47.5 43.5 
+z
+" id="Nimbus_Roman_No9_L_Regular-121"/>
+      <path d="M 46 11.546875 
+L 43.90625 8.9375 
+C 41 5.234375 39.203125 3.625 37.796875 3.625 
+C 37 3.625 36.203125 4.421875 36.203125 5.21875 
+C 36.203125 5.9375 36.203125 5.9375 37.59375 11.53125 
+L 43.296875 32.1875 
+C 43.796875 34.28125 44.203125 36.484375 44.203125 37.890625 
+C 44.203125 41.5 41.5 44 37.59375 44 
+C 31.203125 44 24.90625 38 14.59375 21.96875 
+L 21.296875 43.796875 
+L 21 43.984375 
+C 15.59375 42.890625 13.5 42.5 4.796875 40.90625 
+L 4.796875 39.21875 
+C 9.90625 39.21875 11.203125 38.625 11.203125 36.609375 
+C 11.203125 36 11.09375 35.40625 11 34.90625 
+L 1.40625 -0.1875 
+L 8.90625 -0.1875 
+C 13.59375 15.65625 14.5 17.875 18.90625 24.6875 
+C 24.90625 33.890625 30 38.921875 33.703125 38.921875 
+C 35.203125 38.921875 36.09375 37.8125 36.09375 36 
+C 36.09375 34.8125 35.5 31.5 34.703125 28.390625 
+L 30.296875 11.84375 
+C 29 6.734375 28.703125 5.328125 28.703125 4.328125 
+C 28.703125 0.515625 30.09375 -1.078125 33.40625 -1.078125 
+C 37.90625 -1.078125 40.5 1.03125 47.40625 10.25 
+z
+" id="Nimbus_Roman_No9_L_Regular_Italic-110"/>
+      <path d="M 20.90625 36 
+L 20.90625 55.203125 
+C 20.90625 62.34375 22 63.421875 29.703125 64.125 
+L 29.703125 66 
+L 1.90625 66 
+L 1.90625 64.125 
+C 9.59375 63.421875 10.703125 62.328125 10.703125 55.109375 
+L 10.703125 11.703125 
+C 10.703125 3.28125 9.703125 2.09375 1.90625 2.09375 
+L 1.90625 0 
+L 29.703125 0 
+L 29.703125 1.890625 
+C 22.203125 2.484375 20.90625 3.78125 20.90625 10.921875 
+L 20.90625 31.59375 
+L 51.203125 31.59375 
+L 51.203125 11.828125 
+C 51.203125 3.296875 50.203125 2.09375 42.40625 2.09375 
+L 42.40625 0 
+L 70.203125 0 
+L 70.203125 1.890625 
+C 62.703125 2.484375 61.40625 3.78125 61.40625 10.875 
+L 61.40625 55.140625 
+C 61.40625 62.328125 62.5 63.421875 70.203125 64.125 
+L 70.203125 66 
+L 42.40625 66 
+L 42.40625 64.125 
+C 50.09375 63.421875 51.203125 62.34375 51.203125 55.203125 
+L 51.203125 36 
+z
+" id="Nimbus_Roman_No9_L_Regular-72"/>
+      <path d="M 25.5 -25 
+L 25.5 -20.984375 
+L 15.796875 -20.984375 
+L 15.796875 71 
+L 25.5 71 
+L 25.5 75.015625 
+L 11.796875 75.015625 
+L 11.796875 -25 
+z
+" id="CMR10-91"/>
+      <path d="M 39.796875 15.59375 
+C 35 8.59375 31.40625 6.203125 25.703125 6.203125 
+C 16.59375 6.203125 10.203125 14.203125 10.203125 25.703125 
+C 10.203125 36 15.703125 43.09375 23.796875 43.09375 
+C 27.40625 43.09375 28.703125 42 29.703125 38.296875 
+L 30.296875 36.09375 
+C 31.09375 33.296875 32.90625 31.5 35 31.5 
+C 37.59375 31.5 39.796875 33.40625 39.796875 35.703125 
+C 39.796875 41.296875 32.796875 46 24.40625 46 
+C 19.5 46 14.40625 44 10.296875 40.40625 
+C 5.296875 36 2.5 29.203125 2.5 21.296875 
+C 2.5 8.296875 10.40625 -1 21.5 -1 
+C 26 -1 30 0.59375 33.59375 3.703125 
+C 36.296875 6.09375 38.203125 8.796875 41.203125 14.703125 
+z
+" id="Nimbus_Roman_No9_L_Regular-99"/>
+      <path d="M 1.90625 39.796875 
+C 3.203125 40.09375 4 40.203125 5.09375 40.203125 
+C 7.703125 40.203125 8.59375 38.59375 8.59375 33.78125 
+L 8.59375 8.421875 
+C 8.59375 3 7.203125 1.5 1.59375 1.5 
+L 1.59375 0 
+L 23.796875 0 
+L 23.796875 1.5 
+C 18.5 1.5 17 2.59375 17 6.53125 
+L 17 34.90625 
+C 17 35.09375 17.796875 36.09375 18.5 36.796875 
+C 21 39.09375 25.296875 40.796875 28.796875 40.796875 
+C 33.203125 40.796875 35.40625 37.265625 35.40625 30.1875 
+L 35.40625 8.25 
+C 35.40625 2.609375 34.296875 1.5 28.59375 1.5 
+L 28.59375 0 
+L 51 0 
+L 51 1.5 
+C 45.296875 1.5 43.796875 3.203125 43.796875 9.421875 
+L 43.796875 34.703125 
+C 46.796875 39 50.09375 40.796875 54.703125 40.796875 
+C 60.40625 40.796875 62.203125 38.09375 62.203125 29.796875 
+L 62.203125 8.703125 
+C 62.203125 3 61.40625 2.203125 55.59375 1.5 
+L 55.59375 0 
+L 77.5 0 
+L 77.5 1.5 
+L 74.90625 1.796875 
+C 71.90625 1.796875 70.59375 3.59375 70.59375 7.5 
+L 70.59375 28.15625 
+C 70.59375 39.984375 66.703125 46 59 46 
+C 53.203125 46 48.09375 43.40625 42.703125 37.609375 
+C 40.90625 43.296875 37.5 46 32.09375 46 
+C 27.703125 46 24.90625 44.59375 16.59375 38.296875 
+L 16.59375 45.796875 
+L 15.90625 46 
+C 10.796875 44.09375 7.40625 43 1.90625 41.5 
+z
+" id="Nimbus_Roman_No9_L_Regular-109"/>
+      <path d="M 15.296875 33.40625 
+C 21.203125 33.40625 23.5 33.203125 25.90625 32.3125 
+C 32.09375 30.109375 36 24.40625 36 17.5 
+C 36 9.109375 30.296875 2.609375 22.90625 2.609375 
+C 20.203125 2.609375 18.203125 3.3125 14.5 5.703125 
+C 11.5 7.5 9.796875 8.203125 8.09375 8.203125 
+C 5.796875 8.203125 4.296875 6.8125 4.296875 4.703125 
+C 4.296875 1.203125 8.59375 -1 15.59375 -1 
+C 23.296875 -1 31.203125 1.609375 35.90625 5.703125 
+C 40.59375 9.8125 43.203125 15.609375 43.203125 22.3125 
+C 43.203125 27.40625 41.59375 32.109375 38.703125 35.203125 
+C 36.703125 37.40625 34.796875 38.609375 30.40625 40.5 
+C 37.296875 45.203125 39.796875 48.90625 39.796875 54.3125 
+C 39.796875 62.40625 33.40625 68 24.203125 68 
+C 19.203125 68 14.796875 66.3125 11.203125 63.109375 
+C 8.203125 60.40625 6.703125 57.796875 4.5 51.796875 
+L 6 51.40625 
+C 10.09375 58.703125 14.59375 62 20.90625 62 
+C 27.40625 62 31.90625 57.609375 31.90625 51.3125 
+C 31.90625 47.703125 30.40625 44.109375 27.90625 41.609375 
+C 24.90625 38.609375 22.09375 37.109375 15.296875 34.703125 
+z
+" id="Nimbus_Roman_No9_L_Regular-51"/>
+      <path d="M 15.90625 75.015625 
+L 2.203125 75.015625 
+L 2.203125 71 
+L 11.90625 71 
+L 11.90625 -20.984375 
+L 2.203125 -20.984375 
+L 2.203125 -25 
+L 15.90625 -25 
+z
+" id="CMR10-93"/>
+     </defs>
+     <g transform="translate(91.250474 222.339856)scale(0.1 -0.1)">
+      <use transform="translate(0 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-68"/>
+      <use transform="translate(71.929985 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-101"/>
+      <use transform="translate(116.163554 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-110"/>
+      <use transform="translate(165.976754 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-115"/>
+      <use transform="translate(204.73046 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-105"/>
+      <use transform="translate(232.42563 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-116"/>
+      <use transform="translate(260.120799 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-121"/>
+      <use transform="translate(341.163113 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular_Italic-110"/>
+      <use transform="translate(390.976313 0)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-72"/>
+      <use transform="translate(475.932143 15.050129)scale(0.996264)" xlink:href="#CMR10-91"/>
+      <use transform="translate(503.52765 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-99"/>
+      <use transform="translate(547.761218 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-109"/>
+      <use transform="translate(625.269588 51.213361)scale(0.737241)" xlink:href="#CMSY10-0"/>
+      <use transform="translate(682.552458 51.213361)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-51"/>
+      <use transform="translate(719.912632 15.050129)scale(0.996264)" xlink:href="#CMR10-93"/>
+     </g>
+    </g>
+   </g>
+   <g id="matplotlib.axis_2">
+    <g id="ytick_1">
+     <g id="line2d_6">
+      <defs>
+       <path d="M 0 0 
+L -3.5 0 
+" id="mec8c027021" style="stroke:#000000;stroke-width:0.8;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="34.02" xlink:href="#mec8c027021" y="163.232825"/>
+      </g>
+     </g>
+     <g id="text_7">
+      <!-- $10^{2}$ -->
+      <g transform="translate(13.371186 166.692074)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-50"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_2">
+     <g id="line2d_7">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="34.02" xlink:href="#mec8c027021" y="114.470825"/>
+      </g>
+     </g>
+     <g id="text_8">
+      <!-- $10^{3}$ -->
+      <g transform="translate(13.371186 117.930074)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-51"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_3">
+     <g id="line2d_8">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="34.02" xlink:href="#mec8c027021" y="65.708825"/>
+      </g>
+     </g>
+     <g id="text_9">
+      <!-- $10^{4}$ -->
+      <g transform="translate(13.371186 69.168074)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-52"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_4">
+     <g id="line2d_9">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.8;" x="34.02" xlink:href="#mec8c027021" y="16.946825"/>
+      </g>
+     </g>
+     <g id="text_10">
+      <!-- $10^{5}$ -->
+      <defs>
+       <path d="M 18.09375 59 
+L 37.703125 59 
+C 39.296875 59 39.703125 59.203125 40 59.90625 
+L 43.796875 68.796875 
+L 42.90625 69.5 
+C 41.40625 67.40625 40.40625 66.90625 38.296875 66.90625 
+L 17.40625 66.90625 
+L 6.5 43.109375 
+C 6.40625 42.90625 6.40625 42.8125 6.40625 42.609375 
+C 6.40625 42.109375 6.796875 41.8125 7.59375 41.8125 
+C 10.796875 41.8125 14.796875 41.109375 18.90625 39.796875 
+C 30.40625 36.078125 35.703125 29.84375 35.703125 19.90625 
+C 35.703125 10.25 29.59375 2.703125 21.796875 2.703125 
+C 19.796875 2.703125 18.09375 3.40625 15.09375 5.625 
+C 11.90625 7.9375 9.59375 8.9375 7.5 8.9375 
+C 4.59375 8.9375 3.203125 7.734375 3.203125 5.21875 
+C 3.203125 1.40625 7.90625 -1 15.40625 -1 
+C 23.796875 -1 31 1.71875 36 6.84375 
+C 40.59375 11.375 42.703125 17.09375 42.703125 24.734375 
+C 42.703125 31.96875 40.796875 36.59375 35.796875 41.609375 
+C 31.40625 46.046875 25.703125 48.34375 13.90625 50.453125 
+z
+" id="Nimbus_Roman_No9_L_Regular-53"/>
+      </defs>
+      <g transform="translate(13.371186 20.406074)scale(0.1 -0.1)">
+       <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-49"/>
+       <use transform="translate(49.8132 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-48"/>
+       <use transform="translate(99.626401 36.163231)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-53"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_5">
+     <g id="line2d_10">
+      <defs>
+       <path d="M 0 0 
+L -2 0 
+" id="mc0ad7d39b4" style="stroke:#000000;stroke-width:0.6;"/>
+      </defs>
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="197.316"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_6">
+     <g id="line2d_11">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="188.729438"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_7">
+     <g id="line2d_12">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="182.637175"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_8">
+     <g id="line2d_13">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="177.911649"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_9">
+     <g id="line2d_14">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="174.050613"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_10">
+     <g id="line2d_15">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="170.786154"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_11">
+     <g id="line2d_16">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="167.958351"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_12">
+     <g id="line2d_17">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="165.464051"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_13">
+     <g id="line2d_18">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="148.554"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_14">
+     <g id="line2d_19">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="139.967438"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_15">
+     <g id="line2d_20">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="133.875175"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_16">
+     <g id="line2d_21">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="129.149649"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_17">
+     <g id="line2d_22">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="125.288613"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_18">
+     <g id="line2d_23">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="122.024154"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_19">
+     <g id="line2d_24">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="119.196351"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_20">
+     <g id="line2d_25">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="116.702051"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_21">
+     <g id="line2d_26">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="99.792"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_22">
+     <g id="line2d_27">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="91.205438"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_23">
+     <g id="line2d_28">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="85.113175"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_24">
+     <g id="line2d_29">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="80.387649"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_25">
+     <g id="line2d_30">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="76.526613"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_26">
+     <g id="line2d_31">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="73.262154"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_27">
+     <g id="line2d_32">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="70.434351"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_28">
+     <g id="line2d_33">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="67.940051"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_29">
+     <g id="line2d_34">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="51.03"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_30">
+     <g id="line2d_35">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="42.443438"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_31">
+     <g id="line2d_36">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="36.351175"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_32">
+     <g id="line2d_37">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="31.625649"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_33">
+     <g id="line2d_38">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="27.764613"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_34">
+     <g id="line2d_39">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="24.500154"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_35">
+     <g id="line2d_40">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="21.672351"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_36">
+     <g id="line2d_41">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="19.178051"/>
+      </g>
+     </g>
+    </g>
+    <g id="ytick_37">
+     <g id="line2d_42">
+      <g>
+       <use style="stroke:#000000;stroke-width:0.6;" x="34.02" xlink:href="#mc0ad7d39b4" y="2.268"/>
+      </g>
+     </g>
+    </g>
+    <g id="text_11">
+     <!-- ${\rm Temperature}~T~[{\rm K}]$ -->
+     <defs>
+      <path d="M 25.40625 62 
+L 25.40625 11.734375 
+C 25.40625 3.1875 24.296875 2.09375 16 2.09375 
+L 16 0 
+L 45.203125 0 
+L 45.203125 2.09375 
+C 37 2.09375 35.59375 3.390625 35.59375 10.734375 
+L 35.59375 62 
+L 41 62 
+C 52.40625 62 54.59375 60.203125 56.90625 49.203125 
+L 59.296875 49.203125 
+L 58.703125 66.203125 
+L 2.296875 66.203125 
+L 1.703125 49.203125 
+L 4.09375 49.203125 
+C 6.5 60.09375 8.796875 62 20 62 
+z
+" id="Nimbus_Roman_No9_L_Regular-84"/>
+      <path d="M 0.90625 39.3125 
+C 1.796875 39.40625 2.5 39.40625 3.40625 39.40625 
+C 6.796875 39.40625 7.5 38.40625 7.5 33.671875 
+L 7.5 -13.359375 
+C 7.5 -18.578125 6.40625 -19.703125 0.5 -20.296875 
+L 0.5 -22 
+L 24.703125 -22 
+L 24.703125 -20.203125 
+C 17.203125 -20.203125 15.90625 -19.09375 15.90625 -12.75 
+L 15.90625 3.109375 
+C 19.40625 -0.203125 21.796875 -1.203125 26 -1.203125 
+C 37.796875 -1.203125 47 10.046875 47 24.609375 
+C 47 37.0625 40 46 30.296875 46 
+C 24.703125 46 20.296875 43.5 15.90625 38.09375 
+L 15.90625 45.796875 
+L 15.296875 46 
+C 9.90625 43.90625 6.40625 42.59375 0.90625 40.90625 
+z
+M 15.90625 33.375 
+C 15.90625 36.390625 21.5 40 26.09375 40 
+C 33.5 40 38.40625 32.359375 38.40625 20.6875 
+C 38.40625 9.546875 33.5 2 26.296875 2 
+C 21.59375 2 15.90625 5.609375 15.90625 8.625 
+z
+" id="Nimbus_Roman_No9_L_Regular-112"/>
+      <path d="M 0.703125 39 
+C 2.09375 39.296875 3 39.40625 4.203125 39.40625 
+C 6.703125 39.40625 7.59375 37.796875 7.59375 33.40625 
+L 7.59375 8.40625 
+C 7.59375 3.40625 6.90625 2.703125 0.5 1.5 
+L 0.5 0 
+L 24.5 0 
+L 24.5 1.59375 
+C 17.703125 1.59375 16 3.109375 16 8.828125 
+L 16 31.453125 
+C 16 34.671875 20.296875 39.703125 23 39.703125 
+C 23.59375 39.703125 24.5 39.203125 25.59375 38.203125 
+C 27.203125 36.796875 28.296875 36.203125 29.59375 36.203125 
+C 32 36.203125 33.5 37.90625 33.5 40.703125 
+C 33.5 44 31.40625 46 28 46 
+C 23.796875 46 20.90625 43.703125 16 36.609375 
+L 16 45.796875 
+L 15.5 46 
+C 10.203125 43.796875 6.59375 42.515625 0.703125 40.609375 
+z
+" id="Nimbus_Roman_No9_L_Regular-114"/>
+      <path d="M 44.203125 6.609375 
+C 42.5 5.203125 41.296875 4.703125 39.796875 4.703125 
+C 37.5 4.703125 36.796875 6.09375 36.796875 10.5 
+L 36.796875 30 
+C 36.796875 35.203125 36.296875 38.09375 34.796875 40.5 
+C 32.59375 44.09375 28.296875 46 22.40625 46 
+C 13 46 5.59375 41.09375 5.59375 34.796875 
+C 5.59375 32.5 7.59375 30.5 9.90625 30.5 
+C 12.296875 30.5 14.40625 32.5 14.40625 34.703125 
+C 14.40625 35.09375 14.296875 35.59375 14.203125 36.296875 
+C 14 37.203125 13.90625 38 13.90625 38.703125 
+C 13.90625 41.40625 17.09375 43.59375 21.09375 43.59375 
+C 26 43.59375 28.703125 40.703125 28.703125 35.296875 
+L 28.703125 29.203125 
+C 13.296875 23 11.59375 22.203125 7.296875 18.40625 
+C 5.09375 16.40625 3.703125 13 3.703125 9.703125 
+C 3.703125 3.40625 8.09375 -1 14.203125 -1 
+C 18.59375 -1 22.703125 1.09375 28.796875 6.296875 
+C 29.296875 1.09375 31.09375 -1 35.203125 -1 
+C 38.59375 -1 40.703125 0.203125 44.203125 4 
+z
+M 28.703125 12.234375 
+C 28.703125 9.125 28.203125 8.21875 26.09375 7.015625 
+C 23.703125 5.609375 20.90625 4.703125 18.796875 4.703125 
+C 15.296875 4.703125 12.5 8.125 12.5 12.4375 
+L 12.5 12.84375 
+C 12.5 18.765625 16.59375 22.390625 28.703125 26.796875 
+z
+" id="Nimbus_Roman_No9_L_Regular-97"/>
+      <path d="M 47.90625 5 
+L 47.40625 5 
+C 42.796875 5 41.703125 6.09375 41.703125 10.703125 
+L 41.703125 45 
+L 25.90625 45 
+L 25.90625 43.09375 
+C 32.09375 43.09375 33.296875 42.09375 33.296875 37.109375 
+L 33.296875 13.671875 
+C 33.296875 10.890625 32.796875 9.484375 31.40625 8.390625 
+C 28.703125 6.203125 25.59375 5 22.59375 5 
+C 18.703125 5 15.5 8.390625 15.5 12.578125 
+L 15.5 45 
+L 0.90625 45 
+L 0.90625 43.40625 
+C 5.703125 43.40625 7.09375 41.90625 7.09375 37.296875 
+L 7.09375 12.03125 
+C 7.09375 4.109375 11.90625 -1 19.203125 -1 
+C 22.90625 -1 26.796875 0.59375 29.5 3.296875 
+L 33.796875 7.59375 
+L 33.796875 -0.703125 
+L 34.203125 -0.90625 
+C 39.203125 1.09375 42.796875 2.203125 47.90625 3.59375 
+z
+" id="Nimbus_Roman_No9_L_Regular-117"/>
+      <path d="M 63.296875 65 
+L 10.09375 65 
+L 5.90625 49.671875 
+L 7.703125 49.265625 
+C 13 59.921875 16.203125 61.609375 31.5 61.609375 
+L 17.09375 8.984375 
+C 15.5 3.6875 13.09375 2.09375 6.5 1.59375 
+L 6.5 0 
+L 35.5 0 
+L 35.5 1.59375 
+C 33.796875 1.59375 32.296875 2 31.703125 2 
+C 27.703125 2 26.5 2.890625 26.5 5.90625 
+C 26.5 7.203125 26.796875 8.390625 27.703125 11.78125 
+L 41.59375 61.609375 
+L 47.09375 61.609375 
+C 54.296875 61.609375 57.5 59.09375 57.5 53.46875 
+C 57.5 52.171875 57.40625 50.6875 57.203125 48.96875 
+L 58.90625 48.765625 
+z
+" id="Nimbus_Roman_No9_L_Regular_Italic-84"/>
+      <path d="M 41.296875 64.203125 
+C 42.5 64 43.5 64 43.90625 64 
+C 46.90625 64 48.09375 63.203125 48.09375 61.203125 
+C 48.09375 58.984375 45.796875 55.984375 40.296875 50.890625 
+L 22.59375 34.75 
+L 22.59375 55.21875 
+C 22.59375 62.5 23.703125 63.609375 31.796875 64.203125 
+L 31.796875 66.109375 
+L 3.40625 66.109375 
+L 3.40625 64.203125 
+C 11.203125 63.609375 12.40625 62.3125 12.40625 55.171875 
+L 12.40625 11.71875 
+C 12.40625 3.390625 11.296875 2.09375 3.40625 2.09375 
+L 3.40625 0 
+L 31.59375 0 
+L 31.59375 2.09375 
+C 23.796875 2.09375 22.59375 3.296875 22.59375 10.671875 
+L 22.59375 29.546875 
+L 25.203125 31.65625 
+L 35.796875 21.171875 
+C 43.40625 13.6875 48.796875 6.59375 48.796875 4.203125 
+C 48.796875 2.796875 47.40625 2.09375 44.59375 2.09375 
+C 44.09375 2.09375 43 2.09375 41.796875 1.890625 
+L 41.796875 0 
+L 72.296875 0 
+L 72.296875 2.09375 
+C 67.09375 2.09375 65.703125 3.09375 56.59375 12.75 
+L 33.296875 37.640625 
+L 52.296875 56.421875 
+C 59.09375 62.90625 60.703125 63.703125 67.5 64.203125 
+L 67.5 66.109375 
+L 41.296875 66.109375 
+z
+" id="Nimbus_Roman_No9_L_Regular-75"/>
+     </defs>
+     <g transform="translate(8.880526 137.975632)rotate(-90)scale(0.1 -0.1)">
+      <use transform="scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-84"/>
+      <use transform="translate(53.898284 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-101"/>
+      <use transform="translate(98.131853 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-109"/>
+      <use transform="translate(175.640223 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-112"/>
+      <use transform="translate(225.453424 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-101"/>
+      <use transform="translate(269.686992 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-114"/>
+      <use transform="translate(302.862222 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-97"/>
+      <use transform="translate(347.095791 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-116"/>
+      <use transform="translate(374.79096 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-117"/>
+      <use transform="translate(424.604161 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-114"/>
+      <use transform="translate(457.779391 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-101"/>
+      <use transform="translate(533.242072 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular_Italic-84"/>
+      <use transform="translate(632.514439 0)scale(0.996264)" xlink:href="#CMR10-91"/>
+      <use transform="translate(660.109945 0)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular-75"/>
+      <use transform="translate(732.03993 0)scale(0.996264)" xlink:href="#CMR10-93"/>
+     </g>
+    </g>
+   </g>
+   <g id="line2d_43">
+    <path clip-path="url(#p107a2e5a22)" d="M 77.714425 70.434351 
+L 227.8 70.434351 
+L 227.8 70.434351 
+" style="fill:none;stroke:#000000;stroke-linecap:square;"/>
+   </g>
+   <g id="line2d_44">
+    <path clip-path="url(#p107a2e5a22)" d="M 146.991516 85.113175 
+L 227.8 9.275054 
+L 227.8 9.275054 
+" style="fill:none;stroke:#000000;stroke-linecap:square;"/>
+   </g>
+   <g id="line2d_45">
+    <path clip-path="url(#p107a2e5a22)" d="M -1 70.434351 
+L 77.714425 70.434351 
+" style="fill:none;stroke:#000000;stroke-dasharray:0.6,0.99;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="line2d_46">
+    <path clip-path="url(#p107a2e5a22)" d="M -1 85.113175 
+L 146.991516 85.113175 
+" style="fill:none;stroke:#000000;stroke-dasharray:0.6,0.99;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="line2d_47">
+    <path clip-path="url(#p107a2e5a22)" d="M 77.714425 197.316 
+L 77.714425 70.434351 
+" style="fill:none;stroke:#000000;stroke-dasharray:0.6,0.99;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="line2d_48">
+    <path clip-path="url(#p107a2e5a22)" d="M 146.991516 197.316 
+L 146.991516 85.113175 
+" style="fill:none;stroke:#000000;stroke-dasharray:0.6,0.99;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="line2d_49">
+    <path clip-path="url(#p107a2e5a22)" d="M 68.658545 43.904505 
+L 103.297091 43.904505 
+" style="fill:none;stroke:#000000;stroke-dasharray:2.22,0.96;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="line2d_50">
+    <path clip-path="url(#p107a2e5a22)" d="M 155.254909 57.122263 
+L 189.893455 24.614263 
+" style="fill:none;stroke:#000000;stroke-dasharray:2.22,0.96;stroke-dashoffset:0;stroke-width:0.6;"/>
+   </g>
+   <g id="patch_3">
+    <path d="M 34.02 197.316 
+L 34.02 2.268 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_4">
+    <path d="M 224.532 197.316 
+L 224.532 2.268 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_5">
+    <path d="M 34.02 197.316 
+L 224.532 197.316 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="patch_6">
+    <path d="M 34.02 2.268 
+L 224.532 2.268 
+" style="fill:none;stroke:#000000;stroke-linecap:square;stroke-linejoin:miter;stroke-width:0.8;"/>
+   </g>
+   <g id="text_12">
+    <!-- $n_{\rm H}~\widehat{}~{\tt Cool\_gamma\_effective}$ -->
+    <defs>
+     <path d="M 27.703125 68.5 
+L 54.90625 56.203125 
+L 56.09375 58.40625 
+L 27.796875 74.40625 
+L -0.5 58.40625 
+L 0.59375 56.203125 
+z
+" id="CMEX10-98"/>
+     <path d="M 47.40625 58.296875 
+C 47.40625 59.890625 47.40625 62.09375 44.59375 62.09375 
+C 42.796875 62.09375 42.296875 61 41.90625 60.296875 
+C 41.59375 59.5 40.796875 57.703125 40.5 57 
+C 36.40625 60.59375 31.796875 62.09375 27.59375 62.09375 
+C 14.703125 62.09375 4 48.390625 4 30.59375 
+C 4 12.40625 15 -1 27.59375 -1 
+C 39.296875 -1 47.40625 8.21875 47.40625 16.96875 
+C 47.40625 20.03125 45.296875 20.03125 44.09375 20.03125 
+C 42.703125 20.03125 41.09375 19.53125 40.90625 17.859375 
+C 40.09375 5.109375 30 5.109375 28.296875 5.109375 
+C 19 5.109375 10.5 15.859375 10.5 30.75 
+C 10.5 45.65625 19.09375 56.296875 28.203125 56.296875 
+C 33.59375 56.296875 39.5 52 40.90625 42.796875 
+C 41.203125 40.59375 42.09375 40 44.09375 40 
+C 47.40625 40 47.40625 41.796875 47.40625 43.796875 
+z
+" id="CMTT12-67"/>
+     <path d="M 45.59375 21.640625 
+C 45.59375 34.109375 36.5 44 25.703125 44 
+C 14.90625 44 5.796875 34.109375 5.796875 21.640625 
+C 5.796875 9.078125 15 -0.5 25.703125 -0.5 
+C 36.40625 -0.5 45.59375 9.078125 45.59375 21.640625 
+z
+M 25.703125 5.296875 
+C 18.5 5.296875 12.296875 12.875 12.296875 22.34375 
+C 12.296875 31.609375 18.703125 38.203125 25.703125 38.203125 
+C 32.703125 38.203125 39.09375 31.609375 39.09375 22.34375 
+C 39.09375 12.78125 32.90625 5.296875 25.703125 5.296875 
+z
+" id="CMTT12-111"/>
+     <path d="M 29 56.90625 
+C 29 60 28.40625 61 25 61 
+L 10.296875 61 
+C 8.90625 61 6.296875 61 6.296875 58.109375 
+C 6.296875 55.203125 8.90625 55.203125 10.296875 55.203125 
+L 22.5 55.203125 
+L 22.5 5.796875 
+L 10.296875 5.796875 
+C 8.90625 5.796875 6.296875 5.796875 6.296875 2.90625 
+C 6.296875 0 8.90625 0 10.296875 0 
+L 41.203125 0 
+C 42.59375 0 45.203125 0 45.203125 2.90625 
+C 45.203125 5.796875 42.59375 5.796875 41.203125 5.796875 
+L 29 5.796875 
+z
+" id="CMTT12-108"/>
+     <path d="M 19 6.59375 
+C 17.796875 6.59375 13.203125 6.59375 13.203125 12.015625 
+C 13.203125 13.921875 13.5 14.3125 13.90625 15.109375 
+C 15.90625 13.703125 19.203125 12.40625 22.796875 12.40625 
+C 31.90625 12.40625 38.90625 19.703125 38.90625 28.28125 
+C 38.90625 30.78125 38.296875 33.890625 36.203125 37.1875 
+C 38.796875 38.421875 41.203125 38.703125 42.703125 38.703125 
+C 43.296875 36 46.203125 36 46.296875 36 
+C 47.5 36 50 36.796875 50 39.796875 
+C 50 41.703125 48.5 45 42.90625 45 
+C 41.09375 45 36.59375 44.5625 32.40625 40.90625 
+C 29.09375 43.203125 25.703125 44 22.90625 44 
+C 13.796875 44 6.796875 36.765625 6.796875 28.21875 
+C 6.796875 26.03125 7.296875 22.265625 10.09375 18.703125 
+C 8.203125 15.90625 7.90625 12.90625 7.90625 11.703125 
+C 7.90625 8.203125 9.59375 5.40625 10.40625 4.5 
+C 4.90625 1.484375 2.90625 -3.9375 2.90625 -7.84375 
+C 2.90625 -16.171875 13 -22.5 25.703125 -22.5 
+C 38.40625 -22.5 48.5 -16.28125 48.5 -7.84375 
+C 48.5 6.59375 30.796875 6.59375 27.5 6.59375 
+z
+M 22.796875 18 
+C 17.796875 18 13.296875 22.359375 13.296875 28.203125 
+C 13.296875 34.046875 17.90625 38.390625 22.796875 38.390625 
+C 28.09375 38.390625 32.40625 33.84375 32.40625 28.203125 
+C 32.40625 22.546875 28.09375 18 22.796875 18 
+z
+M 25.703125 -16.890625 
+C 15.90625 -16.890625 8.59375 -12.484375 8.59375 -7.84375 
+C 8.59375 -6.328125 9.09375 -2.8125 12.796875 -0.515625 
+C 15.09375 1 16.09375 1 23 1 
+C 31.796875 1 42.796875 1 42.796875 -7.84375 
+C 42.796875 -12.484375 35.5 -16.890625 25.703125 -16.890625 
+z
+" id="CMTT12-103"/>
+     <path d="M 41.40625 28.375 
+C 41.40625 35.984375 35.703125 44 22.203125 44 
+C 18 44 8.296875 44 8.296875 37.1875 
+C 8.296875 34.53125 10.203125 33.046875 12.40625 33.046875 
+C 13 33.046875 16.296875 33.046875 16.40625 37.3125 
+C 16.40625 37.796875 16.5 37.90625 18.59375 38.09375 
+C 19.796875 38.203125 21.09375 38.203125 22.296875 38.203125 
+C 24.59375 38.203125 28 38.203125 31.296875 35.65625 
+C 34.90625 32.8125 34.90625 29.9375 34.90625 27 
+C 29 27 23.203125 27 17 24.984375 
+C 12 23.265625 5.59375 19.625 5.59375 12.75 
+C 5.59375 5.5625 11.90625 -0.5 21.203125 -0.5 
+C 24.40625 -0.5 30.59375 0 35.796875 3.53125 
+C 37.796875 0 42.796875 0 46.59375 0 
+C 49 0 51.40625 0 51.40625 2.859375 
+C 51.40625 5.703125 48.796875 5.703125 47.40625 5.703125 
+C 44.796875 5.703125 42.796875 5.90625 41.40625 6.515625 
+z
+M 34.90625 13.296875 
+C 34.90625 11 34.90625 8.890625 30.796875 7 
+C 27.296875 5.296875 22.59375 5.296875 22.296875 5.296875 
+C 16.40625 5.296875 12.09375 8.5 12.09375 12.59375 
+C 12.09375 18.5 22.796875 21.890625 34.90625 21.890625 
+z
+" id="CMTT12-97"/>
+     <path d="M 45.296875 30.359375 
+C 45.296875 32.5625 45.296875 43.796875 36.5 43.796875 
+C 33 43.796875 29.59375 42.1875 26.90625 38.484375 
+C 26.296875 39.984375 24.09375 43.796875 19.296875 43.796875 
+C 14.796875 43.796875 11.703125 40.828125 10.90625 39.921875 
+C 10.796875 43 8.703125 43 6.90625 43 
+L 3.90625 43 
+C 2.5 43 -0.09375 43 -0.09375 40.109375 
+C -0.09375 37.203125 2.203125 37.203125 5.296875 37.203125 
+L 5.296875 5.796875 
+C 2.09375 5.796875 -0.09375 5.796875 -0.09375 2.90625 
+C -0.09375 0 2.5 0 3.90625 0 
+L 12.296875 0 
+C 13.703125 0 16.296875 0 16.296875 2.90625 
+C 16.296875 5.796875 14 5.796875 10.90625 5.796875 
+L 10.90625 24.25 
+C 10.90625 32.671875 14.5 38 18.90625 38 
+C 21.703125 38 22.5 34.484375 22.5 29.765625 
+L 22.5 5.796875 
+C 20.796875 5.796875 18.09375 5.796875 18.09375 2.90625 
+C 18.09375 0 20.796875 0 22.203125 0 
+L 29.5 0 
+C 30.90625 0 33.5 0 33.5 2.90625 
+C 33.5 5.796875 31.203125 5.796875 28.09375 5.796875 
+L 28.09375 24.25 
+C 28.09375 32.671875 31.703125 38 36.09375 38 
+C 38.90625 38 39.703125 34.484375 39.703125 29.765625 
+L 39.703125 5.796875 
+C 38 5.796875 35.296875 5.796875 35.296875 2.90625 
+C 35.296875 0 38 0 39.40625 0 
+L 46.703125 0 
+C 48.09375 0 50.703125 0 50.703125 2.90625 
+C 50.703125 5.796875 48.40625 5.796875 45.296875 5.796875 
+z
+" id="CMTT12-109"/>
+     <path d="M 41.59375 19.5 
+C 43.703125 19.5 45.59375 19.5 45.59375 23.15625 
+C 45.59375 34.703125 39 44 26.5 44 
+C 14.90625 44 5.59375 34.015625 5.59375 21.84375 
+C 5.59375 9.28125 15.703125 -0.5 28 -0.5 
+C 40.90625 -0.5 45.59375 8.453125 45.59375 11.078125 
+C 45.59375 11.6875 45.40625 13.921875 42.296875 13.921875 
+C 40.40625 13.921875 39.796875 13.203125 39.203125 11.6875 
+C 36.703125 5.796875 30.203125 5.296875 28.296875 5.296875 
+C 20 5.296875 13.40625 11.6875 12.296875 19.5 
+z
+M 12.40625 25 
+C 13.703125 33.3125 20.203125 38.203125 26.5 38.203125 
+C 36.5 38.203125 38.59375 29.890625 39 25 
+z
+" id="CMTT12-101"/>
+     <path d="M 24.703125 37.203125 
+L 36.796875 37.203125 
+C 38.203125 37.203125 40.796875 37.203125 40.796875 40.109375 
+C 40.796875 43 38.203125 43 36.796875 43 
+L 24.703125 43 
+L 24.703125 48.078125 
+C 24.703125 56 31.703125 56 34.90625 56 
+C 34.90625 55.796875 35.703125 52.03125 39 52.03125 
+C 40.59375 52.03125 42.90625 53.234375 42.90625 56.0625 
+C 42.90625 61.796875 35.296875 61.796875 33.796875 61.796875 
+C 26.203125 61.796875 18.203125 57.453125 18.203125 48.453125 
+L 18.203125 43 
+L 8.296875 43 
+C 6.90625 43 4.203125 43 4.203125 40.109375 
+C 4.203125 37.203125 6.796875 37.203125 8.203125 37.203125 
+L 18.203125 37.203125 
+L 18.203125 5.796875 
+L 8.703125 5.796875 
+C 7.296875 5.796875 4.703125 5.796875 4.703125 2.90625 
+C 4.703125 0 7.296875 0 8.703125 0 
+L 34.203125 0 
+C 35.59375 0 38.203125 0 38.203125 2.90625 
+C 38.203125 5.796875 35.59375 5.796875 34.203125 5.796875 
+L 24.703125 5.796875 
+z
+" id="CMTT12-102"/>
+     <path d="M 45.59375 10.96875 
+C 45.59375 13.5625 43.09375 13.5625 42.296875 13.5625 
+C 40 13.5625 39.59375 12.765625 39.09375 11.375 
+C 36.90625 5.890625 32 5.296875 29.59375 5.296875 
+C 21.09375 5.296875 13.90625 12.375 13.90625 21.640625 
+C 13.90625 26.734375 16.796875 38.203125 29.90625 38.203125 
+C 32.59375 38.203125 34.703125 38 35.59375 37.90625 
+C 36.296875 37.703125 36.40625 37.578125 36.40625 37.078125 
+C 36.703125 32.921875 39.796875 32.921875 40.40625 32.921875 
+C 42.59375 32.921875 44.5 34.421875 44.5 37.109375 
+C 44.5 44 34.40625 44 30 44 
+C 12.90625 44 7.40625 30.03125 7.40625 21.640625 
+C 7.40625 9.484375 16.703125 -0.5 28.703125 -0.5 
+C 42.09375 -0.5 45.59375 9.375 45.59375 10.96875 
+z
+" id="CMTT12-99"/>
+     <path d="M 21.59375 37.203125 
+L 37.796875 37.203125 
+C 39.203125 37.203125 41.796875 37.203125 41.796875 40.109375 
+C 41.796875 43 39.203125 43 37.796875 43 
+L 21.59375 43 
+L 21.59375 51.328125 
+C 21.59375 53.15625 21.59375 55.5 18.40625 55.5 
+C 15.09375 55.5 15.09375 53.15625 15.09375 51.328125 
+L 15.09375 43 
+L 6.59375 43 
+C 5.203125 43 2.5 43 2.5 40.109375 
+C 2.5 37.203125 5.09375 37.203125 6.5 37.203125 
+L 15.09375 37.203125 
+L 15.09375 12.125 
+C 15.09375 2.875 21.5 -0.5 28.703125 -0.5 
+C 34.09375 -0.5 44 2.171875 44 12.34375 
+C 44 14.34375 44 16.53125 40.703125 16.53125 
+C 37.5 16.53125 37.5 14.34375 37.5 12.265625 
+C 37.40625 6.296875 31.703125 5.296875 29.40625 5.296875 
+C 21.59375 5.296875 21.59375 10.265625 21.59375 12.640625 
+z
+" id="CMTT12-116"/>
+     <path d="M 30.203125 56.5 
+C 30.203125 59 28.203125 61 25.703125 61 
+C 23.203125 61 21.203125 59 21.203125 56.5 
+C 21.203125 54 23.203125 52 25.703125 52 
+C 28.203125 52 30.203125 54 30.203125 56.5 
+z
+M 13 43 
+C 11.59375 43 9 43 9 40.109375 
+C 9 37.203125 11.59375 37.203125 13 37.203125 
+L 23.703125 37.203125 
+L 23.703125 5.796875 
+L 12.296875 5.796875 
+C 10.90625 5.796875 8.203125 5.796875 8.203125 2.90625 
+C 8.203125 0 10.90625 0 12.296875 0 
+L 40.09375 0 
+C 41.5 0 44.09375 0 44.09375 2.90625 
+C 44.09375 5.796875 41.5 5.796875 40.09375 5.796875 
+L 30.203125 5.796875 
+L 30.203125 38.921875 
+C 30.203125 42 29.59375 43 26.203125 43 
+z
+" id="CMTT12-105"/>
+     <path d="M 42.40625 37.203125 
+L 45 37.203125 
+C 46.40625 37.203125 49.09375 37.203125 49.09375 40.109375 
+C 49.09375 43 46.40625 43 45 43 
+L 34 43 
+C 32.59375 43 29.90625 43 29.90625 40.109375 
+C 29.90625 37.203125 32.59375 37.203125 34 37.203125 
+L 36.5 37.203125 
+L 25.703125 5.078125 
+L 14.90625 37.203125 
+L 17.40625 37.203125 
+C 18.796875 37.203125 21.5 37.203125 21.5 40.109375 
+C 21.5 43 18.796875 43 17.40625 43 
+L 6.40625 43 
+C 5 43 2.296875 43 2.296875 40.109375 
+C 2.296875 37.203125 5 37.203125 6.40625 37.203125 
+L 9 37.203125 
+L 20.5 2.890625 
+C 21.59375 -0.5 23.796875 -0.5 25.703125 -0.5 
+C 27.59375 -0.5 29.796875 -0.5 30.90625 2.890625 
+z
+" id="CMTT12-118"/>
+    </defs>
+    <g transform="translate(68.658545 53.766087)scale(0.07 -0.07)">
+     <use transform="translate(0 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular_Italic-110"/>
+     <use transform="translate(49.8132 0)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-72"/>
+     <use transform="translate(107.123069 15.050129)scale(0.996264)" xlink:href="#CMEX10-98"/>
+     <use transform="translate(165.998143 15.050129)scale(0.996264)" xlink:href="#CMTT12-67"/>
+     <use transform="translate(217.264254 15.050129)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(268.530365 15.050129)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(319.796475 15.050129)scale(0.996264)" xlink:href="#CMTT12-108"/>
+     <use transform="translate(404.790082 15.050129)scale(0.996264)" xlink:href="#CMTT12-103"/>
+     <use transform="translate(456.056192 15.050129)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(507.322303 15.050129)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(558.588414 15.050129)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(609.854525 15.050129)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(694.848131 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(746.114242 15.050129)scale(0.996264)" xlink:href="#CMTT12-102"/>
+     <use transform="translate(797.380353 15.050129)scale(0.996264)" xlink:href="#CMTT12-102"/>
+     <use transform="translate(848.646464 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(899.912575 15.050129)scale(0.996264)" xlink:href="#CMTT12-99"/>
+     <use transform="translate(951.178685 15.050129)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(1002.444796 15.050129)scale(0.996264)" xlink:href="#CMTT12-105"/>
+     <use transform="translate(1053.710907 15.050129)scale(0.996264)" xlink:href="#CMTT12-118"/>
+     <use transform="translate(1104.977018 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <path d="M 376.683597 15.050129 
+L 404.790082 15.050129 
+L 404.790082 15.448629 
+L 376.683597 15.448629 
+L 376.683597 15.050129 
+z
+"/>
+     <path d="M 666.741647 15.050129 
+L 694.848131 15.050129 
+L 694.848131 15.448629 
+L 666.741647 15.448629 
+L 666.741647 15.050129 
+z
+"/>
+    </g>
+   </g>
+   <g id="text_13">
+    <!-- $n_{\rm H}~\widehat{}~{\tt Jeans\_gamma\_effective}$ -->
+    <defs>
+     <path d="M 40 55.203125 
+L 42.703125 55.203125 
+C 44.09375 55.203125 46.796875 55.203125 46.796875 58.109375 
+C 46.796875 61 44.09375 61 42.703125 61 
+L 25.796875 61 
+C 24.40625 61 21.796875 61 21.796875 58.109375 
+C 21.796875 55.203125 24.40625 55.203125 25.796875 55.203125 
+L 33.5 55.203125 
+L 33.5 14.234375 
+C 33.5 5.515625 25.40625 4.90625 23.703125 4.90625 
+C 22.5 4.90625 17.5 4.90625 14.90625 7.984375 
+C 15.40625 8.59375 15.703125 9.46875 15.703125 10.375 
+C 15.703125 12.453125 14.09375 14.34375 11.703125 14.34375 
+C 9.40625 14.34375 7.59375 12.84375 7.59375 10.0625 
+C 7.59375 3.6875 14.40625 -1 23.59375 -1 
+C 32 -1 40 4.265625 40 13.53125 
+z
+" id="CMTT12-74"/>
+     <path d="M 41.796875 29.359375 
+C 41.796875 39.171875 37.203125 43.796875 29 43.796875 
+C 22.203125 43.796875 17.796875 40 16.09375 38.09375 
+C 16.09375 41.65625 16.09375 43 12.09375 43 
+L 5.5 43 
+C 4.09375 43 1.5 43 1.5 40.109375 
+C 1.5 37.203125 4.09375 37.203125 5.5 37.203125 
+L 9.59375 37.203125 
+L 9.59375 5.796875 
+L 5.5 5.796875 
+C 4.09375 5.796875 1.5 5.796875 1.5 2.90625 
+C 1.5 0 4.09375 0 5.5 0 
+L 20.203125 0 
+C 21.59375 0 24.203125 0 24.203125 2.90625 
+C 24.203125 5.796875 21.59375 5.796875 20.203125 5.796875 
+L 16.09375 5.796875 
+L 16.09375 23.953125 
+C 16.09375 33.671875 22.90625 38 28.296875 38 
+C 34.09375 38 35.296875 34.671875 35.296875 28.96875 
+L 35.296875 5.796875 
+L 31.203125 5.796875 
+C 29.796875 5.796875 27.203125 5.796875 27.203125 2.90625 
+C 27.203125 0 29.796875 0 31.203125 0 
+L 45.90625 0 
+C 47.296875 0 49.90625 0 49.90625 2.90625 
+C 49.90625 5.796875 47.296875 5.796875 45.90625 5.796875 
+L 41.796875 5.796875 
+z
+" id="CMTT12-110"/>
+     <path d="M 42 40.203125 
+C 42 41.796875 42 44 39.203125 44 
+C 36.796875 44 36.203125 41.703125 36.203125 41.59375 
+C 32.203125 44 27.59375 44 25.59375 44 
+C 9.296875 44 7.09375 35.71875 7.09375 32.328125 
+C 7.09375 28.234375 9.5 25.453125 12.90625 23.546875 
+C 16.09375 21.75 19 21.265625 27.203125 19.96875 
+C 31.09375 19.265625 39.09375 17.96875 39.09375 12.484375 
+C 39.09375 8.78125 35.703125 5.296875 26.40625 5.296875 
+C 20 5.296875 15.796875 7.796875 13.703125 15 
+C 13.203125 16.390625 12.90625 17.5 10.40625 17.5 
+C 7.09375 17.5 7.09375 15.59375 7.09375 13.59375 
+L 7.09375 3.296875 
+C 7.09375 1.6875 7.09375 -0.5 9.90625 -0.5 
+C 11.09375 -0.5 12 -0.5 13.5 3.59375 
+C 18.09375 -0.5 23.296875 -0.5 26.40625 -0.5 
+C 44.90625 -0.5 44.90625 11.46875 44.90625 12.46875 
+C 44.90625 22.828125 32.5 24.9375 27.703125 25.625 
+C 18.90625 27.125 12.90625 28.125 12.90625 32.3125 
+C 12.90625 35.015625 16 38.203125 25.40625 38.203125 
+C 34.90625 38.203125 35.296875 33.703125 35.5 31.09375 
+C 35.703125 29.09375 37.5 28.796875 38.703125 28.796875 
+C 42 28.796875 42 30.59375 42 32.59375 
+z
+" id="CMTT12-115"/>
+    </defs>
+    <g transform="translate(158.822024 66.279355)rotate(-43)scale(0.07 -0.07)">
+     <use transform="translate(0 15.050129)scale(0.996264)" xlink:href="#Nimbus_Roman_No9_L_Regular_Italic-110"/>
+     <use transform="translate(49.8132 0)scale(0.737241)" xlink:href="#Nimbus_Roman_No9_L_Regular-72"/>
+     <use transform="translate(107.123069 15.050129)scale(0.996264)" xlink:href="#CMEX10-98"/>
+     <use transform="translate(165.998143 15.050129)scale(0.996264)" xlink:href="#CMTT12-74"/>
+     <use transform="translate(217.264254 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(268.530365 15.050129)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(319.796475 15.050129)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(371.062586 15.050129)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(456.056192 15.050129)scale(0.996264)" xlink:href="#CMTT12-103"/>
+     <use transform="translate(507.322303 15.050129)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(558.588414 15.050129)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(609.854525 15.050129)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(661.120636 15.050129)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(746.114242 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(797.380353 15.050129)scale(0.996264)" xlink:href="#CMTT12-102"/>
+     <use transform="translate(848.646464 15.050129)scale(0.996264)" xlink:href="#CMTT12-102"/>
+     <use transform="translate(899.912575 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(951.178685 15.050129)scale(0.996264)" xlink:href="#CMTT12-99"/>
+     <use transform="translate(1002.444796 15.050129)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(1053.710907 15.050129)scale(0.996264)" xlink:href="#CMTT12-105"/>
+     <use transform="translate(1104.977018 15.050129)scale(0.996264)" xlink:href="#CMTT12-118"/>
+     <use transform="translate(1156.243129 15.050129)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <path d="M 427.949708 15.050129 
+L 456.056192 15.050129 
+L 456.056192 15.448629 
+L 427.949708 15.448629 
+L 427.949708 15.050129 
+z
+"/>
+     <path d="M 718.007758 15.050129 
+L 746.114242 15.050129 
+L 746.114242 15.448629 
+L 718.007758 15.448629 
+L 718.007758 15.050129 
+z
+"/>
+    </g>
+   </g>
+   <g id="text_14">
+    <!-- ${\tt Cool\_density\_threshold\_H\_p\_cm3}$ -->
+    <defs>
+     <path d="M 41.796875 56.90625 
+C 41.796875 60 41.203125 61 37.796875 61 
+L 31.203125 61 
+C 29.796875 61 27.203125 61 27.203125 58.109375 
+C 27.203125 55.203125 29.796875 55.203125 31.203125 55.203125 
+L 35.296875 55.203125 
+L 35.296875 38.796875 
+C 33.703125 40.390625 29.59375 43.796875 23.5 43.796875 
+C 12.90625 43.796875 3.703125 34.1875 3.703125 21.59375 
+C 3.703125 9.296875 12.296875 -0.5 22.59375 -0.5 
+C 29.09375 -0.5 33.40625 3.359375 35.296875 5.546875 
+C 35.296875 1.3125 35.296875 0 39.296875 0 
+L 45.90625 0 
+C 47.296875 0 49.90625 0 49.90625 2.90625 
+C 49.90625 5.796875 47.296875 5.796875 45.90625 5.796875 
+L 41.796875 5.796875 
+z
+M 35.296875 19.09375 
+C 35.296875 13.5 30.703125 5.296875 23.203125 5.296875 
+C 16 5.296875 10.203125 12.59375 10.203125 21.59375 
+C 10.203125 31.1875 17 38 24.09375 38 
+C 30.59375 38 35.296875 32.09375 35.296875 26.890625 
+z
+" id="CMTT12-100"/>
+     <path d="M 42.40625 37.203125 
+L 45 37.203125 
+C 46.40625 37.203125 49.09375 37.203125 49.09375 40.109375 
+C 49.09375 43 46.40625 43 45 43 
+L 34 43 
+C 32.59375 43 29.90625 43 29.90625 40.109375 
+C 29.90625 37.203125 32.59375 37.203125 34 37.203125 
+L 36.5 37.203125 
+C 33.703125 29.0625 28 12.515625 26.59375 6.859375 
+L 26.5 6.859375 
+C 26 9.046875 25.59375 10.125 24.59375 12.8125 
+L 15.296875 37.203125 
+L 17.59375 37.203125 
+C 19 37.203125 21.703125 37.203125 21.703125 40.109375 
+C 21.703125 43 19 43 17.59375 43 
+L 6.59375 43 
+C 5.203125 43 2.5 43 2.5 40.109375 
+C 2.5 37.203125 5.203125 37.203125 6.59375 37.203125 
+L 9.296875 37.203125 
+L 23.296875 1.46875 
+C 23.703125 0.46875 23.703125 0.265625 23.703125 0.171875 
+C 23.703125 -0.03125 21.09375 -8.671875 19.59375 -11.4375 
+C 19 -12.4375 16.5 -17.109375 11.703125 -16.609375 
+C 11.796875 -16.296875 12.09375 -15.703125 12.09375 -14.609375 
+C 12.09375 -12.296875 10.5 -10.703125 8.203125 -10.703125 
+C 5.703125 -10.703125 4.203125 -12.40625 4.203125 -14.703125 
+C 4.203125 -18.5 7.40625 -22.5 12.40625 -22.5 
+C 22.09375 -22.5 26.40625 -9.6875 26.703125 -8.890625 
+z
+" id="CMTT12-121"/>
+     <path d="M 41.796875 29.359375 
+C 41.796875 39.171875 37.203125 43.796875 29 43.796875 
+C 22.203125 43.796875 17.796875 40 16.09375 38.09375 
+L 16.09375 56.9375 
+C 16.09375 60.015625 15.5 61 12.09375 61 
+L 5.5 61 
+C 4.09375 61 1.5 61 1.5 58.109375 
+C 1.5 55.203125 4.09375 55.203125 5.5 55.203125 
+L 9.59375 55.203125 
+L 9.59375 5.796875 
+L 5.5 5.796875 
+C 4.09375 5.796875 1.5 5.796875 1.5 2.90625 
+C 1.5 0 4.09375 0 5.5 0 
+L 20.203125 0 
+C 21.59375 0 24.203125 0 24.203125 2.90625 
+C 24.203125 5.796875 21.59375 5.796875 20.203125 5.796875 
+L 16.09375 5.796875 
+L 16.09375 23.953125 
+C 16.09375 33.671875 22.90625 38 28.296875 38 
+C 34.09375 38 35.296875 34.671875 35.296875 28.96875 
+L 35.296875 5.796875 
+L 31.203125 5.796875 
+C 29.796875 5.796875 27.203125 5.796875 27.203125 2.90625 
+C 27.203125 0 29.796875 0 31.203125 0 
+L 45.90625 0 
+C 47.296875 0 49.90625 0 49.90625 2.90625 
+C 49.90625 5.796875 47.296875 5.796875 45.90625 5.796875 
+L 41.796875 5.796875 
+z
+" id="CMTT12-104"/>
+     <path d="M 21.59375 18.9375 
+C 21.59375 30.875 30 38 38.5 38 
+C 39 38 39.59375 38 40.09375 37.890625 
+C 40.40625 34.140625 43.203125 33.828125 43.90625 33.828125 
+C 46.203125 33.828125 47.796875 35.546875 47.796875 37.78125 
+C 47.796875 42.15625 43.59375 43.796875 38.59375 43.796875 
+C 31.90625 43.796875 26.203125 40.734375 21.59375 35.015625 
+L 21.59375 38.921875 
+C 21.59375 42 21 43 17.59375 43 
+L 7.5 43 
+C 6.09375 43 3.5 43 3.5 40.109375 
+C 3.5 37.203125 6.09375 37.203125 7.5 37.203125 
+L 15.09375 37.203125 
+L 15.09375 5.796875 
+L 7.5 5.796875 
+C 6.09375 5.796875 3.5 5.796875 3.5 2.90625 
+C 3.5 0 6.09375 0 7.5 0 
+L 32.09375 0 
+C 33.5 0 36.09375 0 36.09375 2.90625 
+C 36.09375 5.796875 33.5 5.796875 32.09375 5.796875 
+L 21.59375 5.796875 
+z
+" id="CMTT12-114"/>
+     <path d="M 42.796875 55.203125 
+L 45 55.203125 
+C 46.40625 55.203125 49 55.203125 49 58.109375 
+C 49 61 46.5 61 45 61 
+L 34.09375 61 
+C 32.703125 61 30 61 30 58.109375 
+C 30 55.203125 32.703125 55.203125 34.09375 55.203125 
+L 36.296875 55.203125 
+L 36.296875 34.796875 
+L 15.09375 34.796875 
+L 15.09375 55.203125 
+L 17.296875 55.203125 
+C 18.703125 55.203125 21.40625 55.203125 21.40625 58.109375 
+C 21.40625 61 18.703125 61 17.296875 61 
+L 6.40625 61 
+C 4.90625 61 2.40625 61 2.40625 58.109375 
+C 2.40625 55.203125 5 55.203125 6.40625 55.203125 
+L 8.59375 55.203125 
+L 8.59375 5.796875 
+L 6.40625 5.796875 
+C 5 5.796875 2.40625 5.796875 2.40625 2.90625 
+C 2.40625 0 4.90625 0 6.40625 0 
+L 17.296875 0 
+C 18.703125 0 21.40625 0 21.40625 2.90625 
+C 21.40625 5.796875 18.703125 5.796875 17.296875 5.796875 
+L 15.09375 5.796875 
+L 15.09375 29 
+L 36.296875 29 
+L 36.296875 5.796875 
+L 34.09375 5.796875 
+C 32.703125 5.796875 30 5.796875 30 2.90625 
+C 30 0 32.703125 0 34.09375 0 
+L 45 0 
+C 46.5 0 49 0 49 2.90625 
+C 49 5.796875 46.40625 5.796875 45 5.796875 
+L 42.796875 5.796875 
+z
+" id="CMTT12-72"/>
+     <path d="M 20.203125 -22 
+C 21.59375 -22 24.203125 -22 24.203125 -19.09375 
+C 24.203125 -16.203125 21.59375 -16.203125 20.203125 -16.203125 
+L 16.09375 -16.203125 
+L 16.09375 5.09375 
+C 18.5 2.390625 22.203125 -0.5 27.796875 -0.5 
+C 38.40625 -0.5 47.703125 9.046875 47.703125 21.796875 
+C 47.703125 34.15625 39.296875 44 28.90625 44 
+C 21.59375 44 17 39.578125 16.09375 38.578125 
+C 16.09375 41.625 16.09375 43 12.09375 43 
+L 5.5 43 
+C 4.09375 43 1.5 43 1.5 40.109375 
+C 1.5 37.203125 4.09375 37.203125 5.5 37.203125 
+L 9.59375 37.203125 
+L 9.59375 -16.203125 
+L 5.5 -16.203125 
+C 4.09375 -16.203125 1.5 -16.203125 1.5 -19.09375 
+C 1.5 -22 4.09375 -22 5.5 -22 
+z
+M 16.09375 26.828125 
+C 16.09375 32.5625 21.703125 38.203125 28.203125 38.203125 
+C 35.40625 38.203125 41.203125 30.84375 41.203125 21.796875 
+C 41.203125 12.140625 34.40625 5.296875 27.296875 5.296875 
+C 19.796875 5.296875 16.09375 13.84375 16.09375 18.875 
+z
+" id="CMTT12-112"/>
+     <path d="M 25.59375 30.59375 
+C 36 30.59375 40.59375 23.21875 40.59375 17.765625 
+C 40.59375 11.078125 34.796875 4.796875 26 4.796875 
+C 16 4.796875 11.5 10.25 11.5 11.671875 
+C 11.5 11.859375 11.59375 12.0625 11.703125 12.171875 
+C 12.09375 12.875 12.40625 13.703125 12.40625 14.609375 
+C 12.40625 16.734375 10.796875 18.65625 8.40625 18.65625 
+C 6.296875 18.65625 4.296875 17.34375 4.296875 14.234375 
+C 4.296875 5.453125 13.796875 -1 26 -1 
+C 38.59375 -1 47.09375 8.078125 47.09375 17.65625 
+C 47.09375 22.8125 44.296875 29.96875 35.5 33.796875 
+C 41.59375 37.609375 44.296875 43.625 44.296875 48.84375 
+C 44.296875 56.375 36.796875 62.890625 26 62.890625 
+C 14.90625 62.890625 7.09375 58.078125 7.09375 50.65625 
+C 7.09375 47.34375 9.59375 46.34375 11.203125 46.34375 
+C 13 46.34375 15.203125 47.765625 15.203125 50.515625 
+C 15.203125 52.125 14.40625 53.046875 14.40625 53.140625 
+C 17.40625 57 24.296875 57 26 57 
+C 32.796875 57 37.796875 53.3125 37.796875 48.484375 
+C 37.796875 45.515625 35.90625 37 25.40625 37 
+C 21.703125 37 20.09375 36.796875 19.703125 36.796875 
+C 17.703125 36.578125 17.203125 35.203125 17.203125 33.703125 
+C 17.203125 30.59375 19.296875 30.59375 21 30.59375 
+z
+" id="CMTT12-51"/>
+    </defs>
+    <g transform="translate(75.778884 192.590474)rotate(-90)scale(0.07 -0.07)">
+     <use transform="scale(0.996264)" xlink:href="#CMTT12-67"/>
+     <use transform="translate(51.266111 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(102.532222 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(153.798333 0)scale(0.996264)" xlink:href="#CMTT12-108"/>
+     <use transform="translate(238.791939 0)scale(0.996264)" xlink:href="#CMTT12-100"/>
+     <use transform="translate(290.05805 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(341.32416 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(392.590271 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(443.856382 0)scale(0.996264)" xlink:href="#CMTT12-105"/>
+     <use transform="translate(495.122493 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(546.388604 0)scale(0.996264)" xlink:href="#CMTT12-121"/>
+     <use transform="translate(631.38221 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(682.648321 0)scale(0.996264)" xlink:href="#CMTT12-104"/>
+     <use transform="translate(733.914432 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(785.180542 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(836.446653 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(887.712764 0)scale(0.996264)" xlink:href="#CMTT12-104"/>
+     <use transform="translate(938.978875 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(990.244986 0)scale(0.996264)" xlink:href="#CMTT12-108"/>
+     <use transform="translate(1041.511097 0)scale(0.996264)" xlink:href="#CMTT12-100"/>
+     <use transform="translate(1126.504703 0)scale(0.996264)" xlink:href="#CMTT12-72"/>
+     <use transform="translate(1211.498309 0)scale(0.996264)" xlink:href="#CMTT12-112"/>
+     <use transform="translate(1296.491915 0)scale(0.996264)" xlink:href="#CMTT12-99"/>
+     <use transform="translate(1347.758026 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(1399.024137 0)scale(0.996264)" xlink:href="#CMTT12-51"/>
+     <path d="M 210.685454 0 
+L 238.791939 0 
+L 238.791939 0.3985 
+L 210.685454 0.3985 
+L 210.685454 0 
+z
+"/>
+     <path d="M 603.275726 0 
+L 631.38221 0 
+L 631.38221 0.3985 
+L 603.275726 0.3985 
+L 603.275726 0 
+z
+"/>
+     <path d="M 1098.398219 0 
+L 1126.504703 0 
+L 1126.504703 0.3985 
+L 1098.398219 0.3985 
+L 1098.398219 0 
+z
+"/>
+     <path d="M 1183.391825 0 
+L 1211.498309 0 
+L 1211.498309 0.3985 
+L 1183.391825 0.3985 
+L 1183.391825 0 
+z
+"/>
+     <path d="M 1268.385431 0 
+L 1296.491915 0 
+L 1296.491915 0.3985 
+L 1268.385431 0.3985 
+L 1268.385431 0 
+z
+"/>
+    </g>
+   </g>
+   <g id="text_15">
+    <!-- ${\tt Jeans\_density\_threshold\_H\_p\_cm3}$ -->
+    <g transform="translate(145.055974 192.590474)rotate(-90)scale(0.07 -0.07)">
+     <use transform="scale(0.996264)" xlink:href="#CMTT12-74"/>
+     <use transform="translate(51.266111 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(102.532222 0)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(153.798333 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(205.064443 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(290.05805 0)scale(0.996264)" xlink:href="#CMTT12-100"/>
+     <use transform="translate(341.32416 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(392.590271 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(443.856382 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(495.122493 0)scale(0.996264)" xlink:href="#CMTT12-105"/>
+     <use transform="translate(546.388604 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(597.654715 0)scale(0.996264)" xlink:href="#CMTT12-121"/>
+     <use transform="translate(682.648321 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(733.914432 0)scale(0.996264)" xlink:href="#CMTT12-104"/>
+     <use transform="translate(785.180542 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(836.446653 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(887.712764 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(938.978875 0)scale(0.996264)" xlink:href="#CMTT12-104"/>
+     <use transform="translate(990.244986 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(1041.511097 0)scale(0.996264)" xlink:href="#CMTT12-108"/>
+     <use transform="translate(1092.777208 0)scale(0.996264)" xlink:href="#CMTT12-100"/>
+     <use transform="translate(1177.770814 0)scale(0.996264)" xlink:href="#CMTT12-72"/>
+     <use transform="translate(1262.76442 0)scale(0.996264)" xlink:href="#CMTT12-112"/>
+     <use transform="translate(1347.758026 0)scale(0.996264)" xlink:href="#CMTT12-99"/>
+     <use transform="translate(1399.024137 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(1450.290248 0)scale(0.996264)" xlink:href="#CMTT12-51"/>
+     <path d="M 261.951565 0 
+L 290.05805 0 
+L 290.05805 0.3985 
+L 261.951565 0.3985 
+L 261.951565 0 
+z
+"/>
+     <path d="M 654.541837 0 
+L 682.648321 0 
+L 682.648321 0.3985 
+L 654.541837 0.3985 
+L 654.541837 0 
+z
+"/>
+     <path d="M 1149.664329 0 
+L 1177.770814 0 
+L 1177.770814 0.3985 
+L 1149.664329 0.3985 
+L 1149.664329 0 
+z
+"/>
+     <path d="M 1234.657936 0 
+L 1262.76442 0 
+L 1262.76442 0.3985 
+L 1234.657936 0.3985 
+L 1234.657936 0 
+z
+"/>
+     <path d="M 1319.651542 0 
+L 1347.758026 0 
+L 1347.758026 0.3985 
+L 1319.651542 0.3985 
+L 1319.651542 0 
+z
+"/>
+    </g>
+   </g>
+   <g id="text_16">
+    <!-- ${\tt Cool\_temperature\_norm\_K}$ -->
+    <defs>
+     <path d="M 41.796875 38.921875 
+C 41.796875 42 41.203125 43 37.796875 43 
+L 31.203125 43 
+C 29.796875 43 27.203125 43 27.203125 40.109375 
+C 27.203125 37.203125 29.796875 37.203125 31.203125 37.203125 
+L 35.296875 37.203125 
+L 35.296875 15.625 
+C 35.296875 7.484375 28.5 5.296875 24 5.296875 
+C 16.09375 5.296875 16.09375 8.875 16.09375 11.96875 
+L 16.09375 38.921875 
+C 16.09375 42 15.5 43 12.09375 43 
+L 5.5 43 
+C 4.09375 43 1.5 43 1.5 40.109375 
+C 1.5 37.203125 4.09375 37.203125 5.5 37.203125 
+L 9.59375 37.203125 
+L 9.59375 11.4375 
+C 9.59375 2.28125 15.59375 -0.5 23.296875 -0.5 
+C 29.40625 -0.5 33.203125 1.984375 35.203125 3.671875 
+C 35.203125 0 37.59375 0 39.296875 0 
+L 45.90625 0 
+C 47.296875 0 49.90625 0 49.90625 2.90625 
+C 49.90625 5.796875 47.296875 5.796875 45.90625 5.796875 
+L 41.796875 5.796875 
+z
+" id="CMTT12-117"/>
+     <path d="M 27.09375 35.734375 
+L 42.40625 55.203125 
+C 45.296875 55.203125 47.59375 55.203125 47.59375 58.109375 
+C 47.59375 61 45 61 43.59375 61 
+L 35.203125 61 
+C 33.796875 61 31.203125 61 31.203125 58.109375 
+C 31.203125 55.203125 33.796875 55.203125 35.203125 55.203125 
+L 14.203125 28.453125 
+L 14.203125 55.203125 
+L 16.203125 55.203125 
+C 17.59375 55.203125 20.203125 55.203125 20.203125 58.109375 
+C 20.203125 61 17.59375 61 16.203125 61 
+L 6.59375 61 
+C 5.203125 61 2.59375 61 2.59375 58.109375 
+C 2.59375 55.203125 5.203125 55.203125 6.59375 55.203125 
+L 8.59375 55.203125 
+L 8.59375 5.796875 
+L 6.59375 5.796875 
+C 5.203125 5.796875 2.59375 5.796875 2.59375 2.90625 
+C 2.59375 0 5.203125 0 6.59375 0 
+L 16.203125 0 
+C 17.59375 0 20.203125 0 20.203125 2.90625 
+C 20.203125 5.796875 17.59375 5.796875 16.203125 5.796875 
+L 14.203125 5.796875 
+L 14.203125 19.375 
+L 23.296875 30.953125 
+L 37.203125 5.796875 
+C 35.90625 5.796875 33.296875 5.796875 33.296875 2.90625 
+C 33.296875 0 35.90625 0 37.296875 0 
+L 44.5 0 
+C 45.90625 0 48.5 0 48.5 2.90625 
+C 48.5 5.796875 46.09375 5.796875 43.59375 5.796875 
+z
+" id="CMTT12-75"/>
+    </defs>
+    <g transform="translate(37.862259 66.86623)scale(0.07 -0.07)">
+     <use transform="scale(0.996264)" xlink:href="#CMTT12-67"/>
+     <use transform="translate(51.266111 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(102.532222 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(153.798333 0)scale(0.996264)" xlink:href="#CMTT12-108"/>
+     <use transform="translate(238.791939 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(290.05805 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(341.32416 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(392.590271 0)scale(0.996264)" xlink:href="#CMTT12-112"/>
+     <use transform="translate(443.856382 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(495.122493 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(546.388604 0)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(597.654715 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(648.920825 0)scale(0.996264)" xlink:href="#CMTT12-117"/>
+     <use transform="translate(700.186936 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(751.453047 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(836.446653 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(887.712764 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(938.978875 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(990.244986 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(1075.238592 0)scale(0.996264)" xlink:href="#CMTT12-75"/>
+     <path d="M 210.685454 0 
+L 238.791939 0 
+L 238.791939 0.3985 
+L 210.685454 0.3985 
+L 210.685454 0 
+z
+"/>
+     <path d="M 808.340169 0 
+L 836.446653 0 
+L 836.446653 0.3985 
+L 808.340169 0.3985 
+L 808.340169 0 
+z
+"/>
+     <path d="M 1047.132108 0 
+L 1075.238592 0 
+L 1075.238592 0.3985 
+L 1047.132108 0.3985 
+L 1047.132108 0 
+z
+"/>
+    </g>
+   </g>
+   <g id="text_17">
+    <!-- ${\tt Jeans\_temperature\_norm\_K}$ -->
+    <g transform="translate(37.862259 81.545055)scale(0.07 -0.07)">
+     <use transform="scale(0.996264)" xlink:href="#CMTT12-74"/>
+     <use transform="translate(51.266111 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(102.532222 0)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(153.798333 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(205.064443 0)scale(0.996264)" xlink:href="#CMTT12-115"/>
+     <use transform="translate(290.05805 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(341.32416 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(392.590271 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(443.856382 0)scale(0.996264)" xlink:href="#CMTT12-112"/>
+     <use transform="translate(495.122493 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(546.388604 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(597.654715 0)scale(0.996264)" xlink:href="#CMTT12-97"/>
+     <use transform="translate(648.920825 0)scale(0.996264)" xlink:href="#CMTT12-116"/>
+     <use transform="translate(700.186936 0)scale(0.996264)" xlink:href="#CMTT12-117"/>
+     <use transform="translate(751.453047 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(802.719158 0)scale(0.996264)" xlink:href="#CMTT12-101"/>
+     <use transform="translate(887.712764 0)scale(0.996264)" xlink:href="#CMTT12-110"/>
+     <use transform="translate(938.978875 0)scale(0.996264)" xlink:href="#CMTT12-111"/>
+     <use transform="translate(990.244986 0)scale(0.996264)" xlink:href="#CMTT12-114"/>
+     <use transform="translate(1041.511097 0)scale(0.996264)" xlink:href="#CMTT12-109"/>
+     <use transform="translate(1126.504703 0)scale(0.996264)" xlink:href="#CMTT12-75"/>
+     <path d="M 261.951565 0 
+L 290.05805 0 
+L 290.05805 0.3985 
+L 261.951565 0.3985 
+L 261.951565 0 
+z
+"/>
+     <path d="M 859.60628 0 
+L 887.712764 0 
+L 887.712764 0.3985 
+L 859.60628 0.3985 
+L 859.60628 0 
+z
+"/>
+     <path d="M 1098.398219 0 
+L 1126.504703 0 
+L 1126.504703 0.3985 
+L 1098.398219 0.3985 
+L 1098.398219 0 
+z
+"/>
+    </g>
+   </g>
+  </g>
+ </g>
+ <defs>
+  <clipPath id="p107a2e5a22">
+   <rect height="195.048" width="190.512" x="34.02" y="2.268"/>
+  </clipPath>
+ </defs>
+</svg>
diff --git a/doc/RTD/source/SubgridModels/EAGLE/index.rst b/doc/RTD/source/SubgridModels/EAGLE/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..620572f962948308884414622ca2ae7d555d158f
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/EAGLE/index.rst
@@ -0,0 +1,426 @@
+.. EAGLE sub-grid model
+   Matthieu Schaller, 20th December 2018
+
+
+EAGLE model
+===========
+
+This section of the documentation gives a brief description of the
+different components of the EAGLE sub-grid model. We mostly focus on
+the parameters and values output in the snapshots.
+
+.. _EAGLE_entropy_floors:
+
+Entropy floors
+~~~~~~~~~~~~~~
+
+The gas particles in the EAGLE model are prevented from cooling below a
+certain temperature. The temperature limit depends on the density of the
+particles. Two floors are used in conjonction. Both are implemented as
+polytropic "equations of states" :math:`P = P_c
+\left(\rho/\rho_c\right)^\gamma`, with the constants derived from the user
+input given in terms of temperature and Hydrogen number density.
+
+The first limit, labelled as ``Cool``, is typically used to prevent
+low-density high-metallicity particles to cool below the warm phase because
+of over-cooling induced by the absence of metal diffusion. This limit plays
+only a small role in practice. The second limit, labelled as ``Jeans``, is
+used to prevent the fragmentation of high-density gas into clumps that
+cannot be resolved by the coupled hydro+gravity solver. The two limits are
+sketched on the following figure. An additional over-density criterion is
+applied to prevent gas not collapsed into structures from being
+affected. This criterion demands that :math:`\rho > \Delta_{\rm floor}
+\Omega_b \rho_{\rm crit}`, with :math:`\Delta_{\rm floor}` specified by the
+user and :math:`\rho_{\rm crit}` the critical density at that redshift
+[#f1]_.
+
+.. figure:: EAGLE_entropy_floor.svg
+    :width: 400px
+    :align: center
+    :figclass: align-center
+    :alt: Phase-space diagram displaying the two entropy floors used
+	  in the EAGLE model.
+
+    Temperature-density plane with the two entropy floors used in the EAGLE
+    model indicated by the black lines. Gas particles are not allowed to be
+    below either of these two floors; they are hence forbidden to enter the
+    grey-shaded region. The floors are specified by the position in the
+    plane of the starting point of each line (black circle) and their slope
+    (dashed lines). The parameter names governing the behaviour of the
+    floors are indicated on the figure. Note that unlike what is shown on
+    the figure for clarity reasons, typical values for EAGLE runs place
+    both anchors at the same temperature.
+
+
+The model is governed by 4 parameters for each of the two
+limits. These are given in the ``EAGLEEntropyFloor`` section of the
+YAML file. The parameters are the Hydrogen number density (in
+:math:`cm^{-3}`) and temperature (in :math:`K`) of the anchor point of
+each floor as well as the power-law slope of each floor and the
+minimal over-density required to apply the limit. For a normal
+EAGLE run, that section of the parameter file reads:
+
+.. code:: YAML
+
+  EAGLEEntropyFloor:
+     Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in, expressed in Hydrogen atoms per cm^3.
+     Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+     Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold, expressed in Kelvin.
+     Jeans_gamma_effective:           1.3333333 # Slope of the EAGLE Jeans limiter entropy floor
+     Cool_density_threshold_H_p_cm3:  1e-5      # Physical density above which the EAGLE Cool limiter entropy floor kicks in, expressed in Hydrogen atoms per cm^3.
+     Cool_over_density_threshold:     10.       # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+     Cool_temperature_norm_K:         8000      # Temperature of the EAGLE Cool limiter entropy floor at the density threshold, expressed in Kelvin.
+     Cool_gamma_effective:            1.        # Slope of the EAGLE Cool limiter entropy floor
+
+SWIFT will convert the temperature normalisations and Hydrogen number
+density thresholds into internal energies and densities respectively
+assuming a neutral gas with primoridal abundance pattern. This implies
+that the floor may not be exactly at the position given in the YAML
+file if the gas has different properties. This is especially the case
+for the temperature limit which will often be lower than the imposed
+floor by a factor :math:`\frac{\mu_{\rm neutral}}{\mu_{ionised}}
+\approx \frac{1.22}{0.59} \approx 2` due to the different ionisation
+states of the gas.
+
+Note that the model only makes sense if the ``Cool`` threshold is at a lower
+density than the ``Jeans`` threshold.
+
+.. _EAGLE_chemical_tracers:
+
+Chemical tracers
+~~~~~~~~~~~~~~~~
+
+The gas particles in the EAGLE model carry metal abundance information in the
+form of metal mass fractions. We follow explicitly 9 of the 11 elements that
+`Wiersma et al. (2009)b <http://adsabs.harvard.edu/abs/2009MNRAS.399..574W>`_
+traced in their chemical enrichment model. These are: `H`, `He`, `C`, `N`, `O`,
+`Ne`, `Mg`, `Si` and `Fe` [#f2]_. We additionally follow the total metal mass fraction
+(i.e. absolute metallicity) `Z`. This is typically larger than the sum of the 7
+metals that are individually traced since this will also contain the
+contribution of all the elements that are not individually followed.  We note
+that all of definitions are independent of any definition of solar the solar
+metallicity :math:`Z_\odot` or of any solar abundance pattern.
+
+As part of the diagnostics, we additionally trace the elements coming
+from the different stellar evolution channels. We store for each
+particle the total mass coming from all the SNIa that enriched that
+particle and the metal mass fraction from SNIa. This is the fraction
+of the *total* gas mass that is in the form of metals originating from
+SNIa stars. By construction this fraction will be smaller than the
+total metal mass fraction. The same tracers exist for the SNII and AGB
+channels. Finally, we also compute the iron gas fraction from
+SNIa. This it the fraction of the *total* gas mass that is made of
+iron originating from SNIa explosions. 
+
+We finally also compute the smoothed versions of the individual
+element mass fractions, of the total metal mass fractions, and of the
+iron gas fraction from SNIa.
+
+The chemistry module in ``src/chemistry/EAGLE/`` includes all the arrays
+that are added to the particles and the functions used to compute the
+smoothed elements.
+
+When a star is formed (see the section :ref:`EAGLE_star_formation` below), it
+inherits all the chemical tracers of its parent gas particle.
+
+In the snapshots, we output for each gas and star particle:
+
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| Name                             | Description                         | Units     | Comments                    |
++==================================+=====================================+===========+=============================+
+| ``ElementAbundance``             | | Fraction of the gas/star mass     | [-]       | | Array of length           |
+|                                  | | in the different elements         |           | | 9 for each particle       |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``SmoothedElementAbundance``     | | Fraction of the gas/star mass     | [-]       | | Array of length           |
+|                                  | | in the different elements         |           | | 9 for each particle       |
+|                                  | | smoothed over SPH neighbours      |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``Metallicity``                  | | Fraction of the gas/star mass     | [-]       |                             |
+|                                  | | in *all* metals                   |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``SmoothedMetallicity``          | | Fraction of the gas/star mass     | [-]       |                             |
+|                                  | | in *all* metals                   |           |                             |
+|                                  | | smoothed over SPH neighbours      |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``TotalMassFromSNIa``            | | Total mass of the gas/star        | [U_M]     |                             |
+|                                  | | that was produced by enrichment   |           |                             |
+|                                  | | from SNIa stars                   |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``MetalMassFracFromSNIa``        | | Fraction of the *total* gas/star  | [-]       |                             |
+|                                  | | mass that is in metals produced   |           |                             |
+|                                  | | by enrichment from SNIa stars     |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``TotalMassFromAGB``             | | Total mass of the gas/star        | [U_M]     |                             |
+|                                  | | that was produced by enrichment   |           |                             |
+|                                  | | from AGB stars                    |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``MetalMassFracFromAGB``         | | Fraction of the *total* gas/star  | [-]       |                             |
+|                                  | | mass that is in metals produced   |           |                             |
+|                                  | | by enrichment from AGB star       |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``TotalMassFromSNII``            | | Total mass of the gas/star        | [U_M]     |                             |
+|                                  | | that was produced by enrichment   |           |                             |
+|                                  | | from SNII stars                   |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``MetalMassFracFromSNII``        | | Fraction of the gas/star mass     | [-]       |                             |
+|                                  | | that is in metals produced by     |           |                             |
+|                                  | | enrichment from SNII stars        |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``IronMassFracFromSNIa``         | | Fraction of the *total* gas/star  | [-]       |                             |
+|                                  | | mass in *iron* produced produced  |           |                             |
+|                                  | | by enrichment from SNIa stars     |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``SmoothedIronMassFracFromSNIa`` | | Fraction of the *total* gas/star  | [-]       |                             |
+|                                  | | mass in *iron* produced produced  |           |                             |
+|                                  | | by enrichment from SNIa stars     |           |                             |
+|                                  | | smoothed over SPH neighbours      |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+
+The stars will lose mass over their lifetime (up to ~45%). The fractions will
+remain unchanged but if one is interested in computing an absolute metal mass
+(say) for a star, the ``InitialMass`` (see the section
+:ref:`EAGLE_star_formation` below) of the star must be used.
+
+The chemistry model only requires a small number of parameters to be specified
+in the `EAGLEChemistry` section of the YAML file. These are the initial values
+of the metallicity and element mass fractions. These are then applied at the
+start of a simulation to *all* the *gas* particles. All 9 traced elements have
+to be specified An example section, for primordial abundances (typical for a
+cosmological run), is:
+
+.. code:: YAML
+
+   EAGLEChemistry:
+     init_abundance_metal:        0.    # Mass fraction in *all* metals
+     init_abundance_Hydrogen:     0.755 # Mass fraction in Hydrogen
+     init_abundance_Helium:       0.245 # Mass fraction in Helium
+     init_abundance_Carbon:       0.    # Mass fraction in Carbon
+     init_abundance_Nitrogen:     0.    # Mass fraction in Nitrogen
+     init_abundance_Oxygen:       0.    # Mass fraction in Oxygen
+     init_abundance_Neon:         0.    # Mass fraction in Neon
+     init_abundance_Magnesium:    0.    # Mass fraction in Magnesium
+     init_abundance_Silicon:      0.    # Mass fraction in Silicon
+     init_abundance_Iron:         0.    # Mass fraction in Iron
+
+Whilst one would use the following values for solar abundances
+(typical for an idealised low-redshift run):
+
+.. code:: YAML
+
+   EAGLEChemistry:
+     init_abundance_metal:        0.014        # Mass fraction in *all* metals
+     init_abundance_Hydrogen:     0.70649785   # Mass fraction in Hydrogen
+     init_abundance_Helium:       0.28055534   # Mass fraction in Helium
+     init_abundance_Carbon:       2.0665436e-3 # Mass fraction in Carbon
+     init_abundance_Nitrogen:     8.3562563e-4 # Mass fraction in Nitrogen
+     init_abundance_Oxygen:       5.4926244e-3 # Mass fraction in Oxygen
+     init_abundance_Neon:         1.4144605e-3 # Mass fraction in Neon
+     init_abundance_Magnesium:    5.907064e-4  # Mass fraction in Magnesium
+     init_abundance_Silicon:      6.825874e-4  # Mass fraction in Silicon
+     init_abundance_Iron:         1.1032152e-3 # Mass fraction in Iron
+
+Individual element abundances for each particle can also be read
+directly from the ICs. By default these are overwritten in the code by
+the values read from the YAML file. However, users can set the
+parameter ``init_abundance_metal`` to ``-1`` to make SWIFT ignore the
+values provided in the parameter file:
+
+.. code:: YAML
+
+   EAGLEChemistry:
+     init_abundance_metal:       -1     # Read the particles' metal mass fractions from the ICs.
+
+
+The ICs must then contain values for these three fields (same as what
+is written to the snapshots):
+
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| Name                             | Description                         | Units     | Comments                    |
++==================================+=====================================+===========+=============================+
+| ``ElementAbundance``             | | Fraction of the gas/star mass     | [-]       | | Array of length           |
+|                                  | | in the different elements         |           | | 9 for each particle       |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``Metallicity``                  | | Fraction of the gas/star mass     | [-]       |                             |
+|                                  | | in *all* metals                   |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+| ``IronMassFracFromSNIa``         | | Fraction of the *total* gas/star  | [-]       |                             |
+|                                  | | mass in *iron* produced produced  |           |                             |
+|                                  | | by enrichment from SNIa stars     |           |                             |
++----------------------------------+-------------------------------------+-----------+-----------------------------+
+
+If these fields are absent, then a value of ``0`` will be used for all
+of them, likely leading to issues in the way the code will run.
+
+.. _EAGLE_cooling:
+     
+Gas cooling: Wiersma+2009a
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The gas cooling is based on the redshift-dependent tables of `Wiersma et
+al. (2009)a <http://adsabs.harvard.edu/abs/2009MNRAS.393...99W>`_ that include
+element-by-element cooling rates for the 11 elements (`H`, `He`, `C`, `N`, `O`,
+`Ne`, `Mg`, `Si`, `S`, `Ca` and `Fe`) that dominate the total rates. The tables
+assume that the gas is in ionization equilibrium with the cosmic microwave
+background (CMB) as well as with the evolving X-ray and UV background from
+galaxies and quasars described by the model of `Haardt & Madau (2001)
+<http://adsabs.harvard.edu/abs/2001cghr.confE..64H>`_. Note that this model
+ignores *local* sources of ionization, self-shielding and non-equilibrium
+cooling/heating. The tables can be obtained from this `link
+<http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/EAGLE/coolingtables.tar.gz>`_
+which is a re-packaged version of the `original tables
+<http://www.strw.leidenuniv.nl/WSS08/>`_. The code reading and interpolating the
+table is located in the directory ``src/cooling/EAGLE/``.
+
+The Wiersma tables containing the cooling rates as a function of redshift,
+Hydrogen number density, Helium fraction (:math:`X_{He} / (X_{He} + X_{H})`) and
+element abundance relative to the solar abundance pattern assumed by the tables
+(see equation 4 in the original paper). As the particles do not carry the mass
+fraction of `S` and `Ca`, we compute the contribution to the cooling rate of
+these elements from the abundance of `Si`. More specifically, we assume that
+their abundance by mass relative to the table's solar abundance pattern is the
+same as the relative abundance of `Si` (i.e. :math:`[Ca/Si] = 0` and
+:math:`[S/Si] = 0`). Users can optionally modify the ratios used for `S` and
+`Ca`.
+
+Above the redshift of Hydrogen re-ionization we use the extra table containing
+net cooling rates for gas exposed to the CMB and a UV + X-ray background at
+redshift nine truncated above 1 Rydberg. At the redshift or re-ionization, we
+additionally inject a fixed user-defined amount of energy per unit mass to all
+the gas particles.
+
+In addition to the tables we inject extra energy from Helium re-ionization using
+a Gaussian model with a user-defined redshift for the centre, width and total
+amount of energy injected per unit mass.
+
+For non-cosmological run, we use the :math:`z = 0` table and the interpolation
+along the redshift dimension then becomes a trivial operation.
+
+The cooling itself is performed using an implicit scheme (see the theory
+documents) which for small values of the cooling rates is solved explicitly. For
+larger values we use a bisection scheme. Users can alternatively use a
+Newton-Raphson method that in some cases runs faster than the bisection
+method. If the Newton-Raphson method does not converge after a few steps, the
+code reverts to a bisection scheme, that is guaranteed to converge. The cooling
+rate is added to the calculated change in energy over time from the other
+dynamical equations. This is different from other commonly used codes in the
+literature where the cooling is done instantaneously.
+
+We note that the EAGLE cooling model does not impose any restriction on the
+particles' individual time-steps. The cooling takes place over the time span
+given by the other conditions (e.g the Courant condition).
+
+Finelly, the cooling module also provides a function to compute the temperature
+of a given gas particle based on its density, internal energy, abundances and
+the current redshift. This temperature is the one used to compute the cooling
+rate from the tables and similarly to the cooling rates, they assume that the
+gas is in collisional equilibrium with the background radiation. The
+temperatures are, in particular, computed every time a snapshot is written and
+they are listed for every gas particle:
+
++---------------------+-------------------------------------+-----------+-------------------------------------+
+| Name                | Description                         | Units     | Comments                            |
++=====================+=====================================+===========+=====================================+
+| ``Temperature``     | | Temperature of the gas as         | [U_T]     | | The calculation is performed      |
+|                     | | computed from the tables.         |           | | using quantities at the last      |
+|                     |                                     |           | | time-step the particle was active |
++---------------------+-------------------------------------+-----------+-------------------------------------+
+
+Note that if one is running without cooling switched on at runtime, the
+temperatures can be computed by passing the ``--temparature`` runtime flag (see
+:ref:`cmdline-options`). Note that the tables then have to be available as in
+the case with cooling switched on.
+
+The cooling model is driven by a small number of parameter files in the
+`EAGLECooling` section of the YAML file. These are the re-ionization parameters,
+the path to the tables and optionally the modified abundances of `Ca` and `S` as
+well as the flag to attempt using the Newton-Raphson scheme to solve the
+implicit problem. A valid section of the YAML file looks like:
+
+.. code:: YAML
+
+   EAGLECooling:
+     dir_name:     /path/to/the/Wiersma/tables/directory # Absolute or relative path
+     H_reion_z:            11.5      # Redhift of Hydrogen re-ionization
+     He_reion_z_centre:     3.5      # Centre of the Gaussian used for Helium re-ionization
+     He_reion_z_sigma:      0.5      # Width of the Gaussian used for Helium re-ionization
+     He_reion_ev_p_H:       2.0      # Energy injected in eV per Hydrogen atom for Helium re-ionization.
+
+And the optional parameters are:
+
+.. code:: YAML
+
+   EAGLECooling:
+     Ca_over_Si_in_solar:       1.0 # (Optional) Value of the Calcium mass abundance ratio to solar in units of the Silicon ratio to solar. Default value: 1.
+     S_over_Si_in_solar:        1.0 # (Optional) Value of the Sulphur mass abundance ratio to solar in units of the Silicon ratio to solar. Default value: 1.
+     newton_integration:        0   # (Optional) Set to 1 to use the Newton-Raphson scheme for the explicit cooling problem.
+
+.. _EAGLE_tracers:
+     
+Particle tracers
+~~~~~~~~~~~~~~~~
+
+Over the course of the simulation, the gas particles record some information
+about their evolution. These are updated for a given particle every time it is
+active. The EAGLE tracers module is located in the directory
+``src/tracers/EAGLE/``. 
+
+In the EAGLE model, we trace the maximal tempearature a particle has reached and
+the time at which this happened. When a star is formed (see the section
+:ref:`EAGLE_star_formation` below), it inherits all the tracer values of its parent
+gas particle.  There are no parameters to the model but two values are added to
+the snapshots for each gas and star particle:
+
++----------------------------------------+---------------------------------------+-----------+-----------------------------+
+| Name                                   | Description                           | Units     | Comments                    |
++========================================+=======================================+===========+=============================+
+| | ``Maximal Temperature``              | | Mximal temperature reached by       | | [U_T]   |                             |
+|                                        | | this particle.                      |           |                             |
++----------------------------------------+---------------------------------------+-----------+-----------------------------+
+| | ``Maximal Temperature scale-factor`` | | Scale-factor (cosmological runs)    | | [-]     |                             |
+| | OR                                   | | or time (non-cosmological runs) at  | | OR      |                             |
+| | ``Maximal Temperature time``         | | which the maximum value was reached.| | [U_t]   |                             |
++----------------------------------------+---------------------------------------+-----------+-----------------------------+
+
+
+.. _EAGLE_star_formation:
+
+Star formation: Schaye+2008
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _EAGLE_enrichment:
+
+Stellar enrichment: Wiersma+2009b
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _EAGLE_feedback:
+
+Supernova feedback: Dalla Vecchia+2012
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. _EAGLE_black_hole_seeding:
+
+Black-hole creation
+~~~~~~~~~~~~~~~~~~~
+
+.. _EAGLE_black_hole_accretion:
+
+Black-hole accretion
+~~~~~~~~~~~~~~~~~~~~
+
+.. _EAGLE_black_hole_feedback:
+
+AGN feedback
+~~~~~~~~~~~~
+
+.. [#f1] Recall that in a non-cosmological run the critical density is
+	 set to 0, effectively removing the over-density
+	 constraint of the floors.
+
+.. [#f2] `Wiersma et al. (2009)b
+	 <http://adsabs.harvard.edu/abs/2009MNRAS.399..574W>`_ originally also
+	 followed explicitly `Ca` and and `S`. They are omitted in the EAGLE
+	 model but, when needed, their abundance with respect to solar is
+	 assumed to be the same as the abundance of `Si` with respect to solar
+	 (See the section :ref:`EAGLE_cooling`)
+
+
diff --git a/doc/RTD/source/SubgridModels/EAGLE/plot_EAGLE_entropy_floor.py b/doc/RTD/source/SubgridModels/EAGLE/plot_EAGLE_entropy_floor.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b02dd657fdb32e411961f04d0d758119344b809
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/EAGLE/plot_EAGLE_entropy_floor.py
@@ -0,0 +1,60 @@
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+from scipy import stats
+
+# Plot parameters
+params = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 9,
+    "legend.fontsize": 9,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "text.usetex": True,
+    "figure.figsize": (3.15, 3.15),
+    "figure.subplot.left": 0.15,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.13,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.15,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 2.0,
+    "text.latex.unicode": True,
+}
+rcParams.update(params)
+rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+# Equations of state
+eos_cool_rho = np.logspace(-5, 5, 1000)
+eos_cool_T = eos_cool_rho**0. * 8000.
+eos_Jeans_rho = np.logspace(-1, 5, 1000)
+eos_Jeans_T = (eos_Jeans_rho/ 10**(-1))**(1./3.) * 4000.
+
+# Plot the phase space diagram
+figure()
+subplot(111, xscale="log", yscale="log")
+plot(eos_cool_rho, eos_cool_T, 'k-', lw=1.)
+plot(eos_Jeans_rho, eos_Jeans_T, 'k-', lw=1.)
+plot([1e-10, 1e-5], [8000, 8000], 'k:', lw=0.6)
+plot([1e-10, 1e-1], [4000, 4000], 'k:', lw=0.6)
+plot([1e-5, 1e-5], [20, 8000], 'k:', lw=0.6)
+plot([1e-1, 1e-1], [20, 4000], 'k:', lw=0.6)
+plot([3e-6, 3e-4], [28000, 28000], 'k--', lw=0.6)
+text(3e-6, 22500, "$n_{\\rm H}~\\widehat{}~{\\tt Cool\\_gamma\\_effective}$", va="top", fontsize=7)
+plot([3e-1, 3e1], [15000., 15000.*10.**(2./3.)], 'k--', lw=0.6)
+text(3e-1, 200000, "$n_{\\rm H}~\\widehat{}~{\\tt Jeans\\_gamma\\_effective}$", va="top", rotation=43, fontsize=7)
+text(0.95e-5, 25, "${\\tt Cool\\_density\\_threshold\\_H\\_p\\_cm3}$", rotation=90, va="bottom", ha="right", fontsize=7)
+text(0.95e-1, 25, "${\\tt Jeans\\_density\\_threshold\\_H\\_p\\_cm3}$", rotation=90, va="bottom", ha="right", fontsize=7)
+text(5e-8, 8800, "${\\tt Cool\\_temperature\\_norm\\_K}$", va="bottom", fontsize=7)
+text(5e-8, 4400, "${\\tt Jeans\\_temperature\\_norm\\_K}$", va="bottom", fontsize=7)
+fill_between([1e-5, 1e5], [10, 10], [8000, 8000], color='0.9')
+fill_between([1e-1, 1e5], [4000, 400000], color='0.9')
+scatter([1e-5], [8000], s=4, color='k')
+scatter([1e-1], [4000], s=4, color='k')
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=2)
+xlim(3e-8, 3e3)
+ylim(20., 2e5)
+savefig("EAGLE_entropy_floor.png", dpi=200)
diff --git a/doc/RTD/source/SubgridModels/GEAR/index.rst b/doc/RTD/source/SubgridModels/GEAR/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2a211759bfc4895fd07279b72f78200d6ea47546
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/GEAR/index.rst
@@ -0,0 +1,37 @@
+.. GEAR sub-grid model
+   Matthieu Schaller, 20th December 2018
+
+
+GEAR model
+===========
+
+
+Cooling: Grackle
+~~~~~~~~~~~~~~~~
+   
+Grackle is a chemistry and cooling library presented in `B. Smith et al. 2016 <https://arxiv.org/abs/1610.09591>`_ 
+(do not forget to cite if used).  Four different modes are available:
+equilibrium, 6 species network (H, H\\( ^+ \\), e\\( ^- \\), He, He\\( ^+ \\)
+and He\\( ^{++} \\)), 9 species network (adds H\\(^-\\), H\\(_2\\) and
+H\\(_2^+\\)) and 12 species (adds D, D\\(^+\\) and HD).  Following the same
+order, the swift cooling options are ``grackle``, ``grackle1``, ``grackle2``
+and ``grackle3`` (the numbers correspond to the value of
+``primordial_chemistry`` in Grackle).  It also includes some self-shielding
+methods and UV background.  In order to use the Grackle cooling, you will need
+to provide an HDF5 table computed by Cloudy.
+
+When starting a simulation without providing the different fractions, the code
+supposes an equilibrium and computes the fractions automatically.
+
+In order to compile SWIFT with Grackle, you need to provide the options ``with-grackle``
+and ``with-chemistry``.
+
+You will need a Grackle version later than 3.1. To compile it, run
+the following commands from the root directory of Grackle:
+``./configure; cd src/clib``.
+Update the variables ``LOCAL_HDF5_INSTALL`` and ``MACH_INSTALL_PREFIX`` in
+the file ``src/clib/Make.mach.linux-gnu``.
+Finish with ``make machine-linux-gnu; make && make install``.
+If you encounter any problem, you can look at the `Grackle documentation <https://grackle.readthedocs.io/en/latest/>`_
+
+You can now provide the path given for ``MACH_INSTALL_PREFIX`` to ``with-grackle``.
diff --git a/doc/RTD/source/SubgridModels/index.rst b/doc/RTD/source/SubgridModels/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c1b8bc858c527f0ec6834e8de163306dd1be66cc
--- /dev/null
+++ b/doc/RTD/source/SubgridModels/index.rst
@@ -0,0 +1,18 @@
+.. Subgrid Models
+   Matthieu Schaller, 20th December 2018
+
+.. _subgrid:
+   
+Galaxy Formation Subgrid Models
+===============================
+
+Multiple models are available in SWIFT. The 'Basic' model can
+be use as an empty canvas to be copied to create additional models.
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Available models:
+	      
+   Basic/index	      
+   EAGLE/index
+   GEAR/index
diff --git a/doc/RTD/source/Task/adding_your_own.rst b/doc/RTD/source/Task/adding_your_own.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6f6b37899b505a5bbf6a09d8757232e0b547a081
--- /dev/null
+++ b/doc/RTD/source/Task/adding_your_own.rst
@@ -0,0 +1,262 @@
+.. Task
+   Loic Hausammann 17th July 2018
+
+.. _task_adding_your_own:
+.. highlight:: c
+
+Adding a Task
+=============
+
+First you will need to understand the dependencies between tasks
+using the file ``dependency_graph.dot`` generated by swift at the beginning of any simulation and then decide where it will fit (see :ref:`task`).
+
+For the next paragraphs, let's assume that we want to implement the existing task ``cooling``.
+
+Adding it to the Task List
+--------------------------
+First you will need to add it to the task list situated in ``task.h`` and ``task.c``.
+
+In ``task.h``, you need to provide an additional entry to the enum ``task_types`` (e.g. ``task_type_cooling``).
+The last entry ``task_type_count`` should always stay at the end as it is a counter of the number of elements.
+For example::
+
+    enum task_types {
+      task_type_none = 0,
+      task_type_sort,
+      task_type_self,
+      task_type_pair,
+      task_type_sub_self,
+      task_type_sub_pair,
+      task_type_ghost_in,
+      task_type_ghost,
+      task_type_ghost_out,
+      task_type_extra_ghost,
+      task_type_drift_part,
+      task_type_end_force,
+      task_type_kick1,
+      task_type_kick2,
+      task_type_timestep,
+      task_type_send,
+      task_type_recv,
+      task_type_cooling,
+      task_type_count
+    } __attribute__((packed));
+
+
+In ``task.c``, you will find an array containing the name of each task and need to add your own (e.g. ``cooling``).
+Be careful with the order that should be the same than in the previous list.
+For example::
+
+  /* Task type names. */
+  const char *taskID_names[task_type_count] = {
+    "none",          "sort",       "self",        "pair",      "sub_self",
+    "sub_pair",      "ghost_in",   "ghost",     "ghost_out",
+    "extra_ghost",   "drift_part", "end_force", "kick1",
+    "kick2",         "timestep",   "send",        "recv",
+    "cooling"};
+
+
+Adding it to the Cells
+----------------------
+
+Each cell contains a list to its tasks and therefore you need to provide a link for it.
+
+In ``cell.h``, add a pointer to a task in the structure.
+In order to stay clean, please put the new task in the same group than the other tasks.
+For example::
+
+  struct cell {
+    /* Lot of stuff before. */
+    
+    /*! Task for the cooling */
+    struct task *cooling;
+
+    /*! The second kick task */
+    struct task *kick2;
+    
+    /* Lot of stuff after */
+  }
+
+
+Adding a new Timer
+------------------
+
+As SWIFT is HPC oriented, any new task need to be optimized.
+It cannot be done without timing the function.
+
+In ``timers.h``, you will find an enum that contains all the tasks.
+You will need to add yours inside it.
+For example::
+
+  enum {
+    timer_none = 0,
+    timer_prepare,
+    timer_init,
+    timer_drift_part,
+    timer_drift_gpart,
+    timer_kick1,
+    timer_kick2,
+    timer_timestep,
+    timer_endforce,
+    timer_dosort,
+    timer_doself_density,
+    timer_doself_gradient,
+    timer_doself_force,
+    timer_dopair_density,
+    timer_dopair_gradient,
+    timer_dopair_force,
+    timer_dosub_self_density,
+    timer_dosub_self_gradient,
+    timer_dosub_self_force,
+    timer_dosub_pair_density,
+    timer_dosub_pair_gradient,
+    timer_dosub_pair_force,
+    timer_doself_subset,
+    timer_dopair_subset,
+    timer_dopair_subset_naive,
+    timer_dosub_subset,
+    timer_do_ghost,
+    timer_do_extra_ghost,
+    timer_dorecv_part,
+    timer_do_cooling,
+    timer_gettask,
+    timer_qget,
+    timer_qsteal,
+    timer_locktree,
+    timer_runners,
+    timer_step,
+    timer_cooling,
+    timer_count,
+  };
+
+As for ``task.h``,
+you will need to give a name to your timer in ``timers.c``::
+
+  const char* timers_names[timer_count] = {
+    "none",
+    "prepare",
+    "init",
+    "drift_part",
+    "kick1",
+    "kick2",
+    "timestep",
+    "endforce",
+    "dosort",
+    "doself_density",
+    "doself_gradient",
+    "doself_force",
+    "dopair_density",
+    "dopair_gradient",
+    "dopair_force",
+    "dosub_self_density",
+    "dosub_self_gradient",
+    "dosub_self_force",
+    "dosub_pair_density",
+    "dosub_pair_gradient",
+    "dosub_pair_force",
+    "doself_subset",
+    "dopair_subset",
+    "dopair_subset_naive",
+    "dosub_subset",
+    "do_ghost",
+    "do_extra_ghost",
+    "dorecv_part",
+    "gettask",
+    "qget",
+    "qsteal",
+    "locktree",
+    "runners",
+    "step",
+    "cooling",
+  };
+
+
+You can now easily time
+your functions by using::
+
+  TIMER_TIC;
+  /* Your complicated functions */
+  if (timer) TIMER_TOC(timer_cooling);
+
+
+Adding your Task to the System
+------------------------------
+
+Now the tricky part happens.
+SWIFT is able to deal automatically with the conflicts between tasks, but unfortunately cannot understand the dependencies.
+
+To implement your new task in the task system, you will need to modify a few functions in ``engine.c``.
+
+First, you will need to add mainly two functions: ``scheduler_addtask`` and ``scheduler_addunlocks`` in the ``engine_make_hierarchical_tasks_*`` functions (depending on the type of task you implement, you will need to write it to a different function).
+
+In ``engine_make_hierarchical_tasks_hydro``,
+we add the task through the following call::
+
+  /* Add the cooling task. */
+  c->cooling =
+  scheduler_addtask(s, task_type_cooling, task_subtype_none, 0,
+                    0, c, NULL);
+
+As the ``cooling`` cannot be done before the end of the force computation
+and the second kick cannot be done before the cooling::
+
+  scheduler_addunlock(s, c->super->end_force, c->cooling);
+  scheduler_addunlock(s, c->cooling, c->super->kick2);
+
+
+The next step is to activate your task
+in ``engine_marktasks_mapper``::
+
+  else if (t->type == task_type_cooling || t->type == task_type_sourceterms) {
+    if (cell_is_active_hydro(t->ci, e)) scheduler_activate(s, t);
+  }
+
+Then you will need to update the estimate for the number of tasks in ``engine_estimate_nr_tasks`` by modifying ``n1`` or ``n2``.
+
+Initially, the engine will need to skip the task that updates the particles.
+It is the case for the cooling, therefore you will need to add it in ``engine_skip_force_and_kick``.
+
+Implementing your Task
+----------------------
+
+The last part is situated in ``runner.c``.
+
+You will need to implement a function ``runner_do_cooling``
+(do not forget to time it)::
+
+  void runner_do_cooling(struct runner *r, struct cell *c, int timer) {
+
+    TIMER_TIC;
+
+    /* Now you can check if something is required at this time step.
+     * You may want to use a different cell_is_active function depending
+     * on your task
+     */
+    if (!cell_is_active_hydro(c, e)) return;
+
+    /* Recurse? */
+    if (c->split) {
+      for (int k = 0; k < 8; k++)
+        if (c->progeny[k] != NULL) runner_do_cooling(r, c->progeny[k], 0);
+    } else {
+      /* Implement your cooling here */
+    }
+
+    if (timer) TIMER_TOC(timer_do_cooling);
+  }
+
+
+
+and add a call to this function in ``runner_main``
+in the switch::
+
+  case task_type_cooling:
+    runner_do_cooling(r, t->ci, 1);
+    break;
+
+
+Finalizing your Task
+--------------------
+
+Now that you have done the easiest part, you can start debugging by implementing a test and/or an example.
+Before creating your merge request with your new task, do not forget the most funny part that consists in writing a nice and beautiful documentation ;)
diff --git a/doc/RTD/source/Task/index.rst b/doc/RTD/source/Task/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..82210895618618f87faaada472c72afe321b1d04
--- /dev/null
+++ b/doc/RTD/source/Task/index.rst
@@ -0,0 +1,23 @@
+.. Task
+   Loic Hausammann 17th July 2018
+
+.. _task:
+   
+Task System
+===========
+
+This section of the documentation includes information on the task system
+available in SWIFT, as well as how to implement your own task.
+
+SWIFT can produce a graph containing all the dependencies using graphviz.
+At the beginning of each simulation a ``csv`` file is generated and can be transformed into a ``png`` with the script ``tools/plot_task_dependencies.py``.
+This script has also the possibility to generate a list of function calls for each task with the option ``--with-calls``.
+You can convert the ``dot`` file into a ``png`` with the following command
+``dot -Tpng dependency_graph.dot -o dependency_graph.png`` or directly read it with the python module ``xdot`` with ``python -m xdot dependency_graph.dot``.
+
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+   adding_your_own
diff --git a/doc/RTD/source/VELOCIraptorInterface/index.rst b/doc/RTD/source/VELOCIraptorInterface/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..312b9cd3f893dd44f814ad80ac40748db12bd4d5
--- /dev/null
+++ b/doc/RTD/source/VELOCIraptorInterface/index.rst
@@ -0,0 +1,24 @@
+.. VELOCIraptor Interface
+   Folkert Nobels, 8th October 2018
+
+VELOCIraptor Interface
+======================
+
+This section includes information on the VELOCIraptor interface implemented in
+SWIFT. There are mainly four subsections; the first section explains shortly 
+how VELOCIraptor works, the second subsection explains how to configure SWIFT
+with VELOCIraptor, the third subsection explains how to configure a standalone
+version of VELOCIraptor and the last subsection explains how the output format
+of VELOCIraptor works.
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+   whatis
+   stfwithswift
+   stfalone
+   output
+
+
+
diff --git a/doc/RTD/source/VELOCIraptorInterface/output.rst b/doc/RTD/source/VELOCIraptorInterface/output.rst
new file mode 100644
index 0000000000000000000000000000000000000000..946841fbece0207430846725b6a995cbc3f12613
--- /dev/null
+++ b/doc/RTD/source/VELOCIraptorInterface/output.rst
@@ -0,0 +1,355 @@
+.. VELOCIraptor output
+   Folkert Nobels 12th of October
+
+VELOCIraptor Output
+===================
+
+.. toctree::
+   :maxdepth: 2
+   :hidden:
+   :caption: Contents: 
+
+In general VELOCIraptor outputs six files per snapshot, of which 2 files are
+for unbound particles specifically.  In this part we will explain what is
+inside the different files.
+
+Catalog_groups file
+-------------------
+
+The first output file of VELOCIraptor is the ``.catalog_group`` file,
+this file contains all the information that is group specific, and does not go
+into depth of physical properties but only on numbers of particles and 
+group sizes, the interesting data in the ``.catalog_group`` files are: 
+
++ The ``group_size``: gives a list of all the halos and the number of particles
+  in the halo, this list is numbered from 0 until the number of groups minus
+  one. It is important that the groups are not ordered in any way [#order]_.
+  It is also important to note that the group size includes both the bound and
+  unbound particles; always use the ``Offset`` and ``Offset_unbound`` data
+  when reading from the ``catalog_particles`` files.
++ The ``Num_of_groups`` or ``Total_num_of_groups``: gives the total number of
+  groups in the snapshot.
++ The ``Offset`` list: This list gives the offset off the particles. In the
+  output of VELOCIraptor there is no file which has an ID for every particle
+  and a corresponding group, rather the particles are ordered according to in
+  which group they are. So if we want to access the particles in group 0, we
+  need to look at the particles from ``Offset[0]`` until ``Offset[1]`` in the
+  ``.catalog_particles`` hdf5 file. In general this means that for group N we
+  need to look at particles ``Offset[N]`` until ``Offset[N+1]``. 
++ The ``Offset_unbound`` list: This list works exactly the same as the
+  ``Offset`` list only this list is for the gravitational unbound particles.
+
+Catalog_particles file
+----------------------
+
+The second file that is produced by VELOCIraptor is the ``.catalog_particles``
+file, this file contains mainly all the IDs of the particles and has two
+interesting parameters:
+
++ The ``Num_of_particles_in_groups`` and ``Total_num_of_particles_in_all_groups``
+  parameter: Gives the total number of particles in the file or the total 
+  number of particles that are in halos.
++ The ``Particle_IDs``: The list of particles as sorted by halo, in which halo
+  the individual particles are present can be found by using the
+  ``.catalog_group`` file and the corresponding ``Offset`` list. 
+
+Besides the ``.catalog_particles`` file, there is also a
+``.catalog_particles.unbound`` file, this file contains the same information
+but only for the unbound particles, a particle can only be present in one of
+these two lists. 
+
+Extracting the particles in a given halo
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``.catalog_particles`` file returns particle IDs that need to be matched
+with those in your snapshot to find the particles in the file that you
+wish to extract. The python snippet below should give you an idea of how to
+go about doing this for the bound particles.
+
+First, we need to extract the offset from the ``.catalog_group`` file, and
+work out how many _bound_ particles are in our halo. We can do this by
+looking at the next offset. Then, we can ID match those with the snapshot
+file, and get the mask for the _positions_ in the file that correspond
+to our bound particles. (Note this requires ``numpy > 1.15.0``).
+
+.. code-block:: python
+   :linenos:
+
+   import numpy as np
+   import h5py
+
+   snapshot_file = h5py.File("swift_snapshot.hdf5", "r")
+   group_file = h5py.File("velociraptor_output.catalog_group", "r")
+   particles_file = h5py.File("velociraptor_output.catalog_particles", "r")
+
+   halo = 100
+   # Grab the start position in the particles file to read from
+   halo_start_position = group_file["Offset"][halo]
+   halo_end_position = group_file["Offset"][halo + 1]
+   # We're done with that file now, best to close earlier rather than later
+   group_file.close()
+
+   # Get the relevant particle IDs for that halo; this includes particles
+   # of _all_ types.
+   particle_ids_in_halo = particles_file["Particle_IDs"][
+       halo_start_position:halo_end_position
+   ]
+   # Again, we're done with that file.
+   particles_file.close()
+
+   # Now, the tricky bit. We need to create the correspondence between the
+   # positions in the snapshot file, and the ids.
+
+   # Let's look for the dark matter particles in that halo.
+   particle_ids_from_snapshot = snapshot_file["PartType1/ParticleIDs"][...]
+
+   _, indices_v, indices_p = np.intersect1d(
+       particle_ids_in_halo,
+       particle_ids_from_snapshot,
+       assume_unique=True,
+       return_indices=True,
+   )
+
+   # indices_p gives the positions in the particle file where we will find
+   # the co-ordinates that we're looking for! To get the positions of all of
+   # those particles,
+   particle_positions_in_halo = snapshot_file["PartType1/Coordinates"][indices_p]
+
+
+Catalog_parttypes file
+----------------------
+
+The third file that is produced by VELOCIraptor is the ``.catalog_parttypes``
+file, this file contains the information what type of particle every particle
+is, it is ordered the same as the ``Particle_IDs`` in ``.catalog_particles``. 
+There are only two interesting parameters of the file which are:
+
++ The ``Num_of_particles_in_groups`` parameter: Gives the total number of
+  particles in the file which are in a halo.
++ The ``Particle_types`` list: Gives a list of particles types similar to the
+  snap shots (0 - gas, 1 - dm, 4 - stars).
+
+Besides the ``.catalog_parttypes`` file, there is also a
+``.catalog_parttypes.unbound`` file, this file contains this information for
+the unbound particles.
+
+Properties file
+---------------
+
+The fourth file is the ``.properties`` file, this file contains many physical
+useful information of the corresponding halos. This can be divided in several
+useful groups of physical parameters, on this page we have divided the several
+variables which are present in the ``.properties`` file. This file has most 
+physical interesting parameters of the halos.
+
+Mass-Radius determination:
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``.properties`` file contains many ways to determine the size and mass 
+of the halos, in this subsection we will list several available variables in
+the output of VELOCIraptor and we list several mass and radius parameters in
+the output which are not classified as a mass-radius pair.
+
+Critical Density related:
+"""""""""""""""""""""""""
+
++ ``Mass_200crit``: The mass of a halo with an over density on average of
+  :math:`\Delta=200` based on the critical density of the Universe 
+  (:math:`M_{200}`).
++ ``R_200crit``: The :math:`R_{200}` radius of the halo based on the 
+  critical density of the Universe
+
+Mean Density related:
+"""""""""""""""""""""
+
++ ``Mass_200mean``: The mass of a halo with an over density on average of
+  :math:`\Delta=200` based on the mean density of the Universe 
+  (:math:`M_{200}`).
++ ``R_200mean``: The :math:`R_{200}` radius of the halo based on the 
+  mean density of the Universe.
+
+Virial properties:
+""""""""""""""""""
+
++ ``Mvir``: The virial mass of the halos.
++ ``Rvir``: The virial radius of the halo (:math:`R_{vir}`).
+
+Bryan and Norman 1998 properties:
+"""""""""""""""""""""""""""""""""
+
++ ``Mass_BN98``, The Bryan and Norman (1998) determination of the mass of the
+  halo [#BN98]_. 
++ ``R_BN98``, the Bryan and Norman (1998) corresponding radius [#BN98]_.
+
+Several Mass types:
+"""""""""""""""""""
+This is a list of masses which cannot be categorized as easy as the other 
+properties.
+
++ ``Mass_FOF``: The friends-of-friends mass of the halos.
++ ``M_gas``: The gas mass in the halo.
++ ``Mass_tot``: The total mass of the halo
++ ``M_gas_30kpc``: The gas mass within 30 kpc of the halo centre.
++ ``M_gas_500c``: The gas mass of the over-density of 500 times the critical
+  density
++ ``M_gas_Rvmax``: The gas mass within the maximum rotation velocity.
+
+Several Radius types:
+"""""""""""""""""""""
+
++ ``R_HalfMass``: Radius of half the mass of the halo.
++ ``R_HalfMass_gas``: Radius of half the gas mass of the halo.
++ ``R_size``:
++ ``Rmax``: 
+
+Mass Structure of the Halos:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this subsection we listed the properties of the halos that are determining 
+the mass structure of the halo, so the exact profile and the inertia tensor.
+
+NFW profile properties:
+"""""""""""""""""""""""
++ ``Xc``, ``Yc`` and ``Zc``: The x,y and z centre positions of the
+  halos.
+  
+  Centres are calculated using first all particles belonging to the
+  structure and then VELOCIraptor uses shrinking spheres to iterate to
+  a centre, stopping once the sphere contains <10% of all the
+  particles (this value can be changed to smaller amounts and there is
+  also a minimum particle number which can also be changed).
+  
++ ``Xc_gas``, ``Yc_gas``, ``Zc_gas``: The offset of the centre
+  positions of the halo based on the gas, to find the position of the
+  gas the offsets need to be added to ``Xc``, ``Yc`` and ``Zc``.
+
++ ``cNFW``: The concentration of the halo.
+
+  This is calculated using Vmax and Vvir, not using a fitted profile.
+  
++ ``VXc``, ``VYc`` and ``VZc`` are the velocities in the centre of the halo
+  [#check]_.
++ ``VXc_gas``, ``VYc_gas`` and ``VZc_gas`` are the velocities of the gas  in
+  the centre of the halo [#check]_.
+
+Inertia Tensor properties:
+"""""""""""""""""""""""""""
+
++ ``eig_ij``: Are the normalized eigenvectors of the inertia tensor.
++ The eigenvalue ratios: 
+
+  1. ``q`` is the semi-major over major; 
+  2. ``s`` is the minor over major.
+
++ ``eig_ij_gas``: Are the normalized eigenvectors of the inertia tensor for
+  only the gas particles.
++ The eigenvalue ratios for only the gas, similar to all particles:
+
+  1. ``q_gas`` is the semi-major over major for only gas; 
+  2. ``s_gas`` is the minor over major for only gas.
+
+Dynamical Structure of the Halos:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this subsection we list several properties that determine the dynamical
+structure of the halo, like the angular momentum and the velocity dispersion
+tensor.
+
+Angular momentum and spin parameters:
+"""""""""""""""""""""""""""""""""""""
+
++ ``lambda_b`` is the bullock spin parameter, see the paper by Bullock et al.
+  (2001) [#Bullock]_. 
++ ``Lx``, ``Ly`` and ``Lz`` are the angular momentum of the halos, the 
+  calculation includes all the particle types.
++ ``Lx_gas``, ``Ly_gas`` and ``Lz_gas`` are the angular momentum for only 
+  the gas particles in the snapshot.
+
+Velocity Dispersion related:
+""""""""""""""""""""""""""""
+
++ The complete velocity dispersion tensor (:math:`\sigma_{ij}`) which has 
+  an array per component which gives the value for all the halos. In 
+  general these components are called ``veldisp_ij`` in which i and j are 
+  given by ``x``, ``y`` or ``z``. This means that there are nine 
+  components stored in the ``.properties`` file. This omits the fact 
+  that the dispersion tensor by nature is a symmetric tensor. All the 
+  components are given by: 
+  ``veldisp_xx``, ``veldisp_xy``, ``veldisp_xz``, ``veldisp_yx``, 
+  ``veldisp_yy``, ``veldisp_yz``, ``veldisp_zx``, ``veldisp_zy``, 
+  and ``veldisp_zz`` [#velodisp]_.
++ ``sigV``, the scalar velocity dispersion which corresponds with the 
+  trace of the velocity dispersion tensor 
+  (:math:`\sigma = \text{Tr}(\sigma_{ij})`).
+
+
+Energy properties of the halos:
+"""""""""""""""""""""""""""""""
+
++ ``Ekin``, the kinetic energy of the halo.
++ ``Epot``, the potential energy of the halo.
++ ``Krot``, the rotational energy of the halo.
++ ``Krot_gas``, the rotational energy of the gas in the halo.
+
+
+Halo and subhalo abstract variables:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this subsection we list the ID convention for subhalos and halos and 
+some other abstract quantities of the halo which are not physical but 
+rather properties of the simulations.
+
+Structure types:
+""""""""""""""""
+
++ ``ID`` is the halo ID.
++ ``Structuretype`` is the parameter that indicates what kind of structure 
+  the current halo is. Halos have a structure type of ``10`` and subhalos
+  have a structure type of ``15``.
++ ``hostHaloID``, indicates the halo ID number of the host halo, in the case
+  that the halo has no parent (e.g. is the largest halo), the hostHaloID will
+  be ``-1``.
++ ``numSubStruct``, the number of substructures or subhalos in the halo.
+
+Particle types:
+"""""""""""""""
+
++ ``npart`` is the number of particles in the halo (all types of particles).
++ ``n_gas`` is the number of gas particles in the halo.
+
+Not specified parameters:
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In this section we list parameters which cannot specifically be classified 
+in a group.
+
+
+Most Bound Particle (MBP):
+""""""""""""""""""""""""""
+
++ ``ID_mbp``, the ID of the most bound particle in the halo.
++ ``Xcmbp``, ``Ycmbp`` and ``Zcmbp`` are the positions of the most bound 
+  halo particle [#check]_.
++ ``VXcmbp``, ``VYcmbp`` and ``VZcmbp`` are the velocities of the most bound
+  halo particle [#check]_.
+
+.. [#order] In most cases more massive groups appear earlier in the list, but 
+   this is not guaranteed for larger simulations. The order of the groups is 
+   more a matter of the way that VELOCIraptor searches instead of a physical 
+   reason.
+.. [#center] This is not the average positions of the halos particles, but
+   the halo position found by the VELOCIraptor algorithm. This includes a 
+   fit for all the parameters including the gas particles or other types of
+   particles.
+.. [#velodisp] In the velocity dispersion tensor ( :math:`\sigma_{ij}` )  
+   the following relations are satisfied between components:
+
+   + :math:`\sigma_{xy}=\sigma_{yx}`
+   + :math:`\sigma_{xz}=\sigma_{zx}`
+   + :math:`\sigma_{yz}=\sigma_{yz}`
+.. [#Bullock] The Bullock spin parameter is given by 
+   :math:`\lambda = \frac{J}{\sqrt{2}MVR}`, for more information see 
+   https://arxiv.org/abs/astro-ph/0011001. 
+.. [#BN98] The Bryan and Norman (1998) paper can be found here: 
+   https://arxiv.org/abs/astro-ph/9710107
+.. [#check] Needs to be checked.
diff --git a/doc/RTD/source/VELOCIraptorInterface/stfalone.rst b/doc/RTD/source/VELOCIraptorInterface/stfalone.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e6bd0a72b207d7ca54bae67283326e7dcff51c02
--- /dev/null
+++ b/doc/RTD/source/VELOCIraptorInterface/stfalone.rst
@@ -0,0 +1,79 @@
+.. VELOCIraptor stand alone 
+   Folkert Nobels 12th October 2018
+
+Stand alone VELOCIraptor configuration
+======================================
+
+
+.. toctree::    
+   :maxdepth: 2    
+   :hidden:    
+   :caption: Contents: 
+   
+Besides running VELOCIraptor on the fly when using SWIFT, it is also possible
+to run VELOCIraptor alone without using SWIFT. In this section we explain how 
+VELOCIraptor can be run stand alone without using SWIFT.
+
+Setting up VELOCIraptor
+-----------------------
+
+The first step is setting up VELOCIraptor, this requires us to download the 
+git repository as::
+  
+  git clone https://github.com/pelahi/VELOCIraptor-STF
+
+Similar to the SWIFT with VELOCIraptor configuration, we can use the 
+master to analyse individual snapshots. We can use this branch
+by doing::
+
+  cd VELOCIraptor-STF
+  git fetch
+
+Again we need to configure VELOCIraptor::
+
+  cmake . -DVR_USE_GAS=ON
+
+In this case, we do not need the SWIFT interface, therefore we can drop
+this option (disabled by default).
+
+Compiling VELOCIraptor
+----------------------
+
+Compiling goes completely different as compared to the on the fly halo finder
+configuration with SWIFT. In this case we can compile the code as::
+
+  make 
+
+After this an executable is created (``VELOCIraptor-stf/stf``).
+
+Running VELOCIraptor on a Snapshot
+----------------------------------
+
+After the code is compile the next step is using VELOCIraptor on a single 
+snapshot of a simulation. The code has several options which can be used, which
+can be displayed by running a terminal command of an invalid letter like::
+
+  ./stf -h
+
+which gives the information about the usage of the command::
+
+  VELOCIraptor/STF running with MPI. Number of mpi threads: 1
+  VELOCIraptor/STF running with OpenMP. Number of openmp threads: 8
+  USAGE:
+
+  -C <configuration file (overrides other options)> 
+  -I <input format [Gadget (Default) 1, HDF (if implemented)2, TIPSY 3, RAMSES 4, HDF 2, NCHILADA 5>
+  -i <input file> 
+  -s <number of files per output for gadget input 1 [default]>
+  -Z <number of threads used in parallel read (1)>
+  -o <output filename>
+   ===== EXTRA OPTIONS FOR GADGET INPUT ====== 
+  -g <number of extra sph/gas blocks for gadget>
+  -s <number of extra star blocks for gadget>
+  -b <number of extra bh blocks for gadget>
+   ===== EXTRA OPTIONS REQUIRED FOR RAMSES INPUT ====== 
+  -t <ramses snapnumber>
+
+After this we can run VELOCIraptor on a snapshot as::
+  
+  ./stf -i input -o output -C configfile.txt
diff --git a/doc/RTD/source/VELOCIraptorInterface/stfwithswift.rst b/doc/RTD/source/VELOCIraptorInterface/stfwithswift.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ed261b76abbcefaf5643a69069bb4b8ea1a0894c
--- /dev/null
+++ b/doc/RTD/source/VELOCIraptorInterface/stfwithswift.rst
@@ -0,0 +1,88 @@
+.. SWIFT with VELOCIraptor
+   Folkert Nobels 12th October 2018
+
+
+Configuring SWIFT with VELOCIraptor
+===================================
+
+.. toctree::    
+   :maxdepth: 2    
+   :hidden:    
+   :caption: Contents:
+
+In the following three paragraphs we will explain how to setup VELOCIraptor,
+how to compile it and how to compile SWIFT with VELOCIraptor. 
+
+
+Setting up VELOCIraptor
+-----------------------
+
+Before we can run SWIFT with VELOCIraptor we first need to download
+VELOCIraptor. This can be done by cloning the repository on GitHub_::
+
+  git clone https://github.com/pelahi/VELOCIraptor-STF
+
+Currently the best version that works with SWIFT is the master
+of VELOCIraptor, to get this branch use::
+
+  cd VELOCIraptor-STF 
+  git fetch 
+
+To get VELOCIraptor working with SWIFT simply use::
+
+  cmake . -DVR_USE_SWIFT_INTERFACE=ON -DCMAKE_CXX_FLAGS="-fPIC" -DVR_USE_GAS=ON
+
+If you wish to run swift without MPI, you will need to add ``-DVR_MPI=OFF``.
+
+Compiling VELOCIraptor
+----------------------
+
+After we downloaded the files and made a configuration file we can compile
+VELOCIraptor as follows::
+
+  make -j 4
+
+After the compilation of your code, you will find a static library ``libvelociraptor.a``,
+that is required to run SWIFT with VELOCIraptor.
+Note that VELOCIraptor needs a serial version of the
+HDF5 library, not a parallel build.
+
+Compiling SWIFT
+---------------
+The next part is compiling SWIFT with VELOCIraptor and assumes you already
+downloaded SWIFT from the GitLab_, this can be done by running
+
+.. code:: bash
+  
+  ./autogen.sh 
+  ./configure --with-velociraptor=/path/to/VELOCIraptor-STF/src 
+  make 
+
+In which ``./autogen.sh`` only needs to be run once after the code is cloned
+from the GitLab_, and ``/path/to/`` is the path to the ``VELOCIraptor-STF``
+directory on your machine. In general ``./configure`` can be run with other
+options as desired. After this we can run SWIFT with VELOCIraptor, but for this
+we first need to add several lines to the yaml file of our simulation
+
+    
+.. code:: YAML
+
+   StructureFinding:      
+     config_file_name:     stf_input_6dfof_dmonly_sub.cfg
+     basename:             ./stf
+     scale_factor_first:   0.02
+     delta_time:           1.02
+
+In which we specify the ``.cfg`` file that is used by VELOCIraptor and the 
+other parameters which SWIFT needs to use. In the case of 
+the Small Cosmological Volume DMO example we can run a simulation with halo
+finder as::
+
+  cd examples/SmallCosmoVolume_DM 
+  ../swift --cosmology --hydro --self-gravity --velociraptor --threads=8 small_cosmo_volume_dm.yml
+
+Which activates the VELOCIraptor interface.
+
+
+.. _GitHub: https://github.com/pelahi/VELOCIraptor-STF
+.. _GitLab: https://gitlab.cosma.dur.ac.uk/swift/swiftsim
diff --git a/doc/RTD/source/VELOCIraptorInterface/whatis.rst b/doc/RTD/source/VELOCIraptorInterface/whatis.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a0a2a7441c52c188cc603910b43c112b3e24029e
--- /dev/null
+++ b/doc/RTD/source/VELOCIraptorInterface/whatis.rst
@@ -0,0 +1,65 @@
+.. What is VELOCIraptor
+   Folkert Nobels 12th October 2018
+
+
+What is VELOCIraptor?
+=====================
+
+.. toctree::    
+   :maxdepth: 2    
+   :hidden:    
+   :caption: Contents: 
+
+In SWIFT it is possible to run a cosmological simulation and at the same time
+do on the fly halo finding at specific predefined intervals. For finding the 
+halos SWIFT uses VELOCIraptor (Elahi, Thacker and Widrow; 2011) [#velo]_, this 
+is a C++ halo finder that can use MPI. It differs from other halo finder 
+algorithms in the sense that it uses the velocity distributions of the 
+particles in the simulations and the the positions of the particles to get
+a better estimate of which particles are part of a specific halo and 
+whether there are substructures in halos. 
+
+The Algorithm
+-------------
+
+The VELOCIraptor algorithm consists basically of the following steps [#ref]_:
+
+1. A kd-tree is constructed based on the maximization of the Shannon-entropy,
+   this means that every level in the kd-tree an equal number of particles 
+   are distributed between the 8 lower nodes. This is based on their position
+   and their corresponding density, this results in more equal density 
+   distributed nodes. This is also the implicit step in the algorithm that 
+   takes into account the absolute positions of the particles.
+2. The next part is calculating the the centre of mass velocity and the 
+   velocity distribution for every individual node in the kd-tree. 
+3. Then the algorithm estimates the background velocity density function for
+   every particle based on the cell of the particle and the six nearest
+   neighbour cells. This prevents the background velocity density function 
+   to be over sensitive for variations between different cells due to dominant
+   halo features in the velocity density function. 
+4. After this the algorithm searches for the nearest velocity neighbours 
+   (:math:`N_v`) from a set of nearest position neighbours (:math:`N_x>N_v`).
+   The neighbours' positions do not need to be in the cell of the particles, in
+   general the set of nearest position neighbours is substantially larger than
+   the nearest velocity neighbours, the default is set as :math:`N_x=32 N_v`.
+5. The individual local velocity density function is calculated for every 
+   particle.
+6. The fractional difference is calculated between the local velocity density 
+   function and the background velocity density function.
+7. Based on the calculated ratio, outliers are picked and the outliers are  
+   grouped together in halos and subhalos.
+  
+
+
+.. Every halo finder has limitations, the limitations of VELOCIraptor are:
+
+.. 1. The algorithm is mostly sensitive to substructures that are on the tail
+   of the Gaussian velocity density function, this means that VELOCIraptor
+   is most sensitive for subhalos which are cold (slow rotating) but have 
+   a large bulk velocity
+
+
+.. _Velociraptor: http://adsabs.harvard.edu/abs/2011MNRAS.418..320E
+.. [#velo] For technical information regarding VELOCIraptor see: Velociraptor_
+.. [#ref] This part is based on the explanation given in the Elahi, Thacker and
+   Widrow (2011) paper (Velociraptor_)
diff --git a/doc/RTD/source/conf.py b/doc/RTD/source/conf.py
index 031687ea5228252e2d2e44ec0bd6f53b1b64d732..fac755bbb4ee9cd25bf3526bc435c69be3a9d5b5 100644
--- a/doc/RTD/source/conf.py
+++ b/doc/RTD/source/conf.py
@@ -18,14 +18,14 @@
 
 # -- Project information -----------------------------------------------------
 
-project = 'SWIFT: SPH WIth Fine-grained inter-dependent Tasking'
+project = 'SWIFT: SPH With Inter-dependent Fine-grained Tasking'
 copyright = '2018, SWIFT Collaboration'
 author = 'SWIFT Team'
 
 # The short X.Y version
-version = '0.7'
+version = '0.8'
 # The full version, including alpha/beta/rc tags
-release = '0.7.0'
+release = '0.8.0'
 
 
 # -- General configuration ---------------------------------------------------
@@ -87,7 +87,7 @@ html_theme = 'sphinx_rtd_theme'
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['.static']
+# html_static_path = ['.static']
 
 # Custom sidebar templates, must be a dictionary that maps document names
 # to template names.
diff --git a/doc/RTD/source/index.rst b/doc/RTD/source/index.rst
index 888945a5c0101bb6f59b574a30f1f736ad134079..83422b4e5caf05bacb3824d06426b9cdeba3921e 100644
--- a/doc/RTD/source/index.rst
+++ b/doc/RTD/source/index.rst
@@ -3,7 +3,7 @@
    You can adapt this file completely to your liking, but it should at least
    contain the root `toctree` directive.
 
-Welcome to SWIFT: SPH WIth Fine-grained inter-dependent Tasking's documentation!
+Welcome to SWIFT: SPH With Inter-dependent Fine-grained Tasking's documentation!
 ================================================================================
 
 Want to get started using SWIFT? Check out the on-boarding guide available
@@ -15,8 +15,14 @@ difference is the parameter file that will need to be adapted for SWIFT.
    :maxdepth: 2
 
    GettingStarted/index
+   CommandLineOptions/index
+   ParameterFiles/index
    InitialConditions/index
+   Snapshots/index
    HydroSchemes/index
-   Cooling/index
+   SubgridModels/index
    EquationOfState/index
+   ExternalPotentials/index
    NewOption/index
+   Task/index
+   VELOCIraptorInterface/index
diff --git a/examples/ConstantCosmoVolume/constant_volume.yml b/examples/ConstantCosmoVolume/constant_volume.yml
deleted file mode 100644
index ad31fd1972565b0d7683711a20db78e854c3dc5f..0000000000000000000000000000000000000000
--- a/examples/ConstantCosmoVolume/constant_volume.yml
+++ /dev/null
@@ -1,53 +0,0 @@
-# Define the system of units to use internally. 
-InternalUnitSystem:
-  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
-  UnitVelocity_in_cgs: 1e5   # km/s in centimeters per second
-  UnitCurrent_in_cgs:  1   # Amperes
-  UnitTemp_in_cgs:     1   # Kelvin
-
-Cosmology:
-  Omega_m: 1.
-  Omega_lambda: 0.
-  Omega_b: 1.
-  h: 1.
-  a_begin: 0.00990099
-  a_end: 1.0
-
-# Parameters governing the time integration
-TimeIntegration:
-  dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
-  dt_max:     5e-3  # The maximal time-step size of the simulation (in internal units).
-
-# Parameters governing the snapshots
-Snapshots:
-  basename:	       box      # Common part of the name of output files
-  time_first:          0.       # Time of the first output (in internal units)
-  delta_time:          1.04     # Time difference between consecutive outputs (in internal units)
-  scale_factor_first:  0.00991
-  compression:         4
-
-# Parameters governing the conserved quantities statistics
-Statistics:
-  delta_time:          2. # Time between statistics output
-
-# Parameters for the hydrodynamics scheme
-SPH:
-  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation 
-  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-
-# Parameters related to the initial conditions
-InitialConditions:
-  file_name:  ./constantBox.hdf5       # The file to read
-
-Scheduler:
-  max_top_level_cells: 8
-  cell_split_size:     50
-  
-Gravity:
-  mesh_side_length:   32
-  eta: 0.025
-  theta: 0.3
-  r_cut_max: 5.
-  comoving_softening: 0.05
-  max_physical_softening: 0.05
diff --git a/examples/CoolingBox/coolingBox.yml b/examples/Cooling/CoolingBox/coolingBox.yml
similarity index 74%
rename from examples/CoolingBox/coolingBox.yml
rename to examples/Cooling/CoolingBox/coolingBox.yml
index 2bd2f19f6d78388ae638521f590255d410bc8697..853e480cd4ffba4baa659232e6d5f068b4ea2815 100644
--- a/examples/CoolingBox/coolingBox.yml
+++ b/examples/Cooling/CoolingBox/coolingBox.yml
@@ -27,17 +27,16 @@ Statistics:
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature: 100.       # Kelvin
   
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./coolingBox.hdf5     # The file to read
-
+  periodic:   1
+  
 # Dimensionless pre-factor for the time-step condition
 LambdaCooling:
-  lambda_cgs:                  1.0e-22    # Cooling rate (in cgs units)
-  minimum_temperature:         1.0e4      # Minimal temperature (Kelvin)
-  mean_molecular_weight:       0.59       # Mean molecular weight
-  hydrogen_mass_abundance:     0.75       # Hydrogen mass abundance (dimensionless)
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
   cooling_tstep_mult:          1.0        # Dimensionless pre-factor for the time-step condition
 
 # Cooling with Grackle 2.0
@@ -49,23 +48,28 @@ GrackleCooling:
   ProvideVolumetricHeatingRates: 0 # User provide volumetric heating rates
   ProvideSpecificHeatingRates: 0 # User provide specific heating rates
   SelfShieldingMethod: 0 # Grackle (<= 3) or Gear self shielding method
-  OutputMode: 1 # Write in output corresponding primordial chemistry mode
   MaxSteps: 1000
   ConvergenceLimit: 1e-2
   
-EAGLEChemistry:
-  InitMetallicity:         0.
-  InitAbundance_Hydrogen:  0.752
-  InitAbundance_Helium:    0.248
-  InitAbundance_Carbon:    0.000
-  InitAbundance_Nitrogen:  0.000
-  InitAbundance_Oxygen:    0.000
-  InitAbundance_Neon:      0.000
-  InitAbundance_Magnesium: 0.000
-  InitAbundance_Silicon:   0.000
-  InitAbundance_Iron:      0.000
-  CalciumOverSilicon:      0.0941736
-  SulphurOverSilicon:      0.6054160
-
 GearChemistry:
   InitialMetallicity: 0.01295
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5
+  He_reion_z_center:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+EAGLEChemistry:             # Solar abundances
+  init_abundance_metal:     0.014
+  init_abundance_Hydrogen:  0.70649785
+  init_abundance_Helium:    0.28055534
+  init_abundance_Carbon:    2.0665436e-3
+  init_abundance_Nitrogen:  8.3562563e-4
+  init_abundance_Oxygen:    5.4926244e-3
+  init_abundance_Neon:      1.4144605e-3
+  init_abundance_Magnesium: 5.907064e-4
+  init_abundance_Silicon:   6.825874e-4
+  init_abundance_Iron:      1.1032152e-3
+
diff --git a/examples/CoolingBox/getGlass.sh b/examples/Cooling/CoolingBox/getGlass.sh
similarity index 100%
rename from examples/CoolingBox/getGlass.sh
rename to examples/Cooling/CoolingBox/getGlass.sh
diff --git a/examples/Cooling/CoolingBox/makeIC.py b/examples/Cooling/CoolingBox/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..807ad5378d87891ed878a85c1020d5983474e13d
--- /dev/null
+++ b/examples/Cooling/CoolingBox/makeIC.py
@@ -0,0 +1,114 @@
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2016 Stefan Arridge (stefan.arridge@durhama.ac.uk)
+#                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
+
+import h5py
+import numpy as np
+
+# Generates a SWIFT IC file with a constant density and pressure
+
+# Parameters
+periodic = 1  # 1 For periodic box
+boxSize = 1  # 1 kiloparsec
+rho = 3.2e3  # Density in code units (3.2e6 is 0.1 hydrogen atoms per cm^3)
+T = 4e3  # Initial Temperature
+gamma = 5./3.  # Gas adiabatic index
+fileName = "coolingBox.hdf5"
+# ---------------------------------------------------
+
+# defines some constants
+# need to be changed in plotTemperature.py too
+h_frac = 0.76
+mu = 4. / (1. + 3. * h_frac)
+
+m_h_cgs = 1.67e-24
+k_b_cgs = 1.38e-16
+
+# defines units
+unit_length = 3.0857e21  # kpc
+unit_mass = 2.0e33  # solar mass
+unit_time = 3.0857e16  # ~ Gyr
+
+# Read id, position and h from glass
+glass = h5py.File("glassCube_32.hdf5", "r")
+ids = glass["/PartType0/ParticleIDs"][:]
+pos = glass["/PartType0/Coordinates"][:, :] * boxSize
+h = glass["/PartType0/SmoothingLength"][:] * boxSize
+
+# Compute basic properties
+numPart = np.size(pos) // 3
+mass = boxSize**3 * rho / numPart
+internalEnergy = k_b_cgs * T * mu / ((gamma - 1.) * m_h_cgs)
+internalEnergy *= (unit_time / unit_length)**2
+
+# File
+f = h5py.File(fileName, 'w')
+
+# Header
+grp = f.create_group("/Header")
+grp.attrs["BoxSize"] = boxSize
+grp.attrs["NumPart_Total"] = [numPart, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = 0
+
+# Runtime parameters
+grp = f.create_group("/RuntimePars")
+grp.attrs["PeriodicBoundariesOn"] = periodic
+
+# Units
+grp = f.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = unit_length
+grp.attrs["Unit mass in cgs (U_M)"] = unit_mass
+grp.attrs["Unit time in cgs (U_t)"] = unit_time
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
+# Particle group
+grp = f.create_group("/PartType0")
+
+v = np.zeros((numPart, 3))
+ds = grp.create_dataset('Velocities', (numPart, 3), 'f')
+ds[()] = v
+
+m = np.full((numPart, 1), mass)
+ds = grp.create_dataset('Masses', (numPart, 1), 'f')
+ds[()] = m
+
+h = np.reshape(h, (numPart, 1))
+ds = grp.create_dataset('SmoothingLength', (numPart, 1), 'f')
+ds[()] = h
+
+u = np.full((numPart, 1), internalEnergy)
+ds = grp.create_dataset('InternalEnergy', (numPart, 1), 'f')
+ds[()] = u
+
+ids = np.reshape(ids, (numPart, 1))
+ds = grp.create_dataset('ParticleIDs', (numPart, 1), 'L')
+ds[()] = ids
+
+ds = grp.create_dataset('Coordinates', (numPart, 3), 'd')
+ds[()] = pos
+
+f.close()
+
+print("Initial condition generated")
diff --git a/examples/Cooling/CoolingBox/plotEnergy.py b/examples/Cooling/CoolingBox/plotEnergy.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c7af57d3d9dfdcfa222e9f77701f230d25f9ddc
--- /dev/null
+++ b/examples/Cooling/CoolingBox/plotEnergy.py
@@ -0,0 +1,110 @@
+from h5py import File
+import numpy as np
+import matplotlib
+from glob import glob
+matplotlib.use("Agg")
+import matplotlib.pyplot as plt
+
+# Plot parameters
+params = {
+    'axes.labelsize': 10,
+    'axes.titlesize': 10,
+    'font.size': 12,
+    'legend.fontsize': 12,
+    'xtick.labelsize': 10,
+    'ytick.labelsize': 10,
+    'text.usetex': True,
+    'figure.figsize': (5, 5),
+    'figure.subplot.left': 0.145,
+    'figure.subplot.right': 0.99,
+    'figure.subplot.bottom': 0.11,
+    'figure.subplot.top': 0.99,
+    'figure.subplot.wspace': 0.15,
+    'figure.subplot.hspace': 0.12,
+    'lines.markersize': 6,
+    'lines.linewidth': 3.,
+}
+plt.rcParams.update(params)
+
+
+# Some constants in cgs units
+k_b_cgs = 1.38e-16  # boltzmann
+m_h_cgs = 1.67e-24  # proton mass
+
+
+# File containing the total energy
+stats_filename = "./energy.txt"
+
+# First snapshot
+snap_filename = "coolingBox_0000.hdf5"
+
+# Read the initial state of the gas
+f = File(snap_filename, 'r')
+
+# Read the units parameters from the snapshot
+units = f["InternalCodeUnits"]
+unit_mass = units.attrs["Unit mass in cgs (U_M)"]
+unit_length = units.attrs["Unit length in cgs (U_L)"]
+unit_time = units.attrs["Unit time in cgs (U_t)"]
+
+# Read the adiabatic index
+gamma = float(f["HydroScheme"].attrs["Adiabatic index"])
+
+
+def energyUnits(u):
+    """ Compute the temperature from the internal energy. """
+    u *= (unit_length / unit_time)**2
+    return u * m_h_cgs / k_b_cgs
+
+
+# Read energy and time arrays
+array = np.genfromtxt(stats_filename, skip_header=1)
+time = array[:, 0] * unit_time
+total_mass = array[:, 1]
+total_energy = array[:, 2]
+kinetic_energy = array[:, 3]
+internal_energy = array[:, 4]
+radiated_energy = array[:, 8]
+initial_energy = total_energy[0]
+
+# Conversions to cgs
+total_energy_cgs = total_energy / total_mass[0]
+total_energy_cgs = energyUnits(total_energy_cgs)
+
+kinetic_energy_cgs = kinetic_energy / total_mass[0]
+kinetic_energy_cgs = energyUnits(kinetic_energy_cgs)
+
+internal_energy_cgs = internal_energy / total_mass[0]
+internal_energy_cgs = energyUnits(internal_energy_cgs)
+
+radiated_energy_cgs = radiated_energy / total_mass[0]
+radiated_energy_cgs = energyUnits(radiated_energy_cgs)
+
+# Read snapshots
+files = glob("coolingBox_*.hdf5")
+N = len(files)
+temp_snap = np.zeros(N)
+time_snap_cgs = np.zeros(N)
+for i in range(N):
+    snap = File(files[i], 'r')
+    u = snap["/PartType0/InternalEnergy"][:] * snap["/PartType0/Masses"][:]
+    u = sum(u) / total_mass[0]
+    temp_snap[i] = energyUnits(u)
+    time_snap_cgs[i] = snap["/Header"].attrs["Time"] * unit_time
+
+
+plt.figure()
+
+Myr_in_yr = 3.15e13
+plt.plot(time, total_energy_cgs, 'r-', lw=1.6, label="Gas total energy")
+plt.plot(time_snap_cgs, temp_snap, 'rD', ms=3)
+plt.plot(time, radiated_energy_cgs, 'g-', lw=1.6, label="Radiated energy")
+plt.plot(time, total_energy_cgs + radiated_energy_cgs, 'b-',
+         lw=0.6, label="Gas total + radiated")
+
+plt.legend(loc="right", fontsize=8, frameon=False,
+           handlelength=3, ncol=1)
+plt.xlabel("${\\rm{Time~[Myr]}}$", labelpad=0)
+plt.ylabel("${\\rm{Internal ~Energy ~(u ~m_H / k_B) ~[K]}}$")
+
+plt.savefig("energy.png", dpi=200)
diff --git a/examples/CoolingBox/run.sh b/examples/Cooling/CoolingBox/run.sh
similarity index 86%
rename from examples/CoolingBox/run.sh
rename to examples/Cooling/CoolingBox/run.sh
index 30b2177a6e8bb95a20146397f8b6a5021161b27f..ae5b6d361e3364028864882d3400e702c8e670fb 100755
--- a/examples/CoolingBox/run.sh
+++ b/examples/Cooling/CoolingBox/run.sh
@@ -21,7 +21,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -C -t 1 coolingBox.yml
+../../swift --hydro --cooling --threads=4 -n 1000 coolingBox.yml
 
 # Check energy conservation and cooling rate
-python energy_plot.py
+python plotEnergy.py
diff --git a/examples/CoolingHalo/README b/examples/Cooling/CoolingHalo/README
similarity index 100%
rename from examples/CoolingHalo/README
rename to examples/Cooling/CoolingHalo/README
diff --git a/examples/CoolingHalo/cooling_halo.yml b/examples/Cooling/CoolingHalo/cooling_halo.yml
similarity index 87%
rename from examples/CoolingHalo/cooling_halo.yml
rename to examples/Cooling/CoolingHalo/cooling_halo.yml
index 68c3478b717261698ac175835fc246e134e3a6a7..3d6e44ae3efdb4ad0687f61d904d87d55bb2837b 100644
--- a/examples/CoolingHalo/cooling_halo.yml
+++ b/examples/Cooling/CoolingHalo/cooling_halo.yml
@@ -27,11 +27,13 @@ Snapshots:
 SPH:
   resolution_eta:        1.2349   # Target smoothing length in units of the mean inter-particle separation (1.2349 == 48Ngbs with the cubic spline kernel).
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-
+  minimal_temperature:   1e4      # Kelvin
+  
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  CoolingHalo.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 IsothermalPotential:
   vrot:            200.     # rotation speed of isothermal potential in internal units
@@ -40,8 +42,5 @@ IsothermalPotential:
 
 # Cooling parameters
 LambdaCooling:
-  lambda_cgs:                  1.0e-22    # Cooling rate (in cgs units)
-  minimum_temperature:         1.0e4  # Minimal temperature (Kelvin)
-  mean_molecular_weight:       0.59   # Mean molecular weight
-  hydrogen_mass_abundance:     0.75   # Hydrogen mass abundance (dimensionless)
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
   cooling_tstep_mult:          1.0    # Dimensionless pre-factor for the time-step condition
diff --git a/examples/CoolingHalo/density_profile.py b/examples/Cooling/CoolingHalo/density_profile.py
similarity index 100%
rename from examples/CoolingHalo/density_profile.py
rename to examples/Cooling/CoolingHalo/density_profile.py
diff --git a/examples/CoolingHalo/internal_energy_profile.py b/examples/Cooling/CoolingHalo/internal_energy_profile.py
similarity index 100%
rename from examples/CoolingHalo/internal_energy_profile.py
rename to examples/Cooling/CoolingHalo/internal_energy_profile.py
diff --git a/examples/CoolingHalo/makeIC.py b/examples/Cooling/CoolingHalo/makeIC.py
similarity index 98%
rename from examples/CoolingHalo/makeIC.py
rename to examples/Cooling/CoolingHalo/makeIC.py
index 3ec1be6f7b5e568ebe8e0fefe508ef8287edb29c..046e5d619f047f8c6d40eab5a5cfce2e3a02074d 100644
--- a/examples/CoolingHalo/makeIC.py
+++ b/examples/Cooling/CoolingHalo/makeIC.py
@@ -91,10 +91,6 @@ grp.attrs["Unit current in cgs (U_I)"] = 1.
 grp.attrs["Unit temperature in cgs (U_T)"] = 1.
 
 
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 np.random.seed(1234)
 
diff --git a/examples/CoolingHaloWithSpin/makeIC_random_box.py b/examples/Cooling/CoolingHalo/makeIC_random_box.py
similarity index 97%
rename from examples/CoolingHaloWithSpin/makeIC_random_box.py
rename to examples/Cooling/CoolingHalo/makeIC_random_box.py
index 4295cb135233f2d5a59405b44e6d8e9c80a1f6c0..be8f2f172e5b7aef385f0974445e44068021c99d 100644
--- a/examples/CoolingHaloWithSpin/makeIC_random_box.py
+++ b/examples/Cooling/CoolingHalo/makeIC_random_box.py
@@ -102,10 +102,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 np.random.seed(1234)
 
diff --git a/examples/CoolingHalo/run.sh b/examples/Cooling/CoolingHalo/run.sh
similarity index 75%
rename from examples/CoolingHalo/run.sh
rename to examples/Cooling/CoolingHalo/run.sh
index 60ceae649d183dce3a7e5019a1ff94ce7bc4f08d..ca086fc93d5c4c34567c4405d7f3670972bbde9d 100755
--- a/examples/CoolingHalo/run.sh
+++ b/examples/Cooling/CoolingHalo/run.sh
@@ -4,7 +4,7 @@
 echo "Generating initial conditions for the isothermal potential box example..."
 python makeIC.py 10000 
 
-../swift -g -s -C -t 16 cooling_halo.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --cooling --threads=16 cooling_halo.yml 2>&1 | tee output.log
 
 python radial_profile.py 2. 200 100
 
diff --git a/examples/CoolingHalo/test_energy_conservation.py b/examples/Cooling/CoolingHalo/test_energy_conservation.py
similarity index 100%
rename from examples/CoolingHalo/test_energy_conservation.py
rename to examples/Cooling/CoolingHalo/test_energy_conservation.py
diff --git a/examples/CoolingHalo/velocity_profile.py b/examples/Cooling/CoolingHalo/velocity_profile.py
similarity index 100%
rename from examples/CoolingHalo/velocity_profile.py
rename to examples/Cooling/CoolingHalo/velocity_profile.py
diff --git a/examples/CoolingHaloWithSpin/README b/examples/Cooling/CoolingHaloWithSpin/README
similarity index 100%
rename from examples/CoolingHaloWithSpin/README
rename to examples/Cooling/CoolingHaloWithSpin/README
diff --git a/examples/CoolingHaloWithSpin/cooling_halo.yml b/examples/Cooling/CoolingHaloWithSpin/cooling_halo.yml
similarity index 87%
rename from examples/CoolingHaloWithSpin/cooling_halo.yml
rename to examples/Cooling/CoolingHaloWithSpin/cooling_halo.yml
index f6e9fe3b124631fc2d5336db8a7ffb18f7b34a95..1b29e1376e47ad32beacaf9bfb5408b8ff4d3191 100644
--- a/examples/CoolingHaloWithSpin/cooling_halo.yml
+++ b/examples/Cooling/CoolingHaloWithSpin/cooling_halo.yml
@@ -27,11 +27,13 @@ Snapshots:
 SPH:
   resolution_eta:        1.2349   # Target smoothing length in units of the mean inter-particle separation (1.2349 == 48Ngbs with the cubic spline kernel).
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   1e4      # Kelvin
 
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  CoolingHalo.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 IsothermalPotential:
   vrot:            200.   # Rotation speed of isothermal potential in internal units
@@ -40,8 +42,5 @@ IsothermalPotential:
 
 # Cooling parameters
 LambdaCooling:
-  lambda_cgs:                  1.0e-22   # Cooling rate (in cgs units)
-  minimum_temperature:         1.0e4  # Minimal temperature (Kelvin)
-  mean_molecular_weight:       0.59   # Mean molecular weight
-  hydrogen_mass_abundance:     0.75   # Hydrogen mass abundance (dimensionless)
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
   cooling_tstep_mult:          0.1    # Dimensionless pre-factor for the time-step condition
diff --git a/examples/CoolingHaloWithSpin/density_profile.py b/examples/Cooling/CoolingHaloWithSpin/density_profile.py
similarity index 100%
rename from examples/CoolingHaloWithSpin/density_profile.py
rename to examples/Cooling/CoolingHaloWithSpin/density_profile.py
diff --git a/examples/CoolingHaloWithSpin/internal_energy_profile.py b/examples/Cooling/CoolingHaloWithSpin/internal_energy_profile.py
similarity index 100%
rename from examples/CoolingHaloWithSpin/internal_energy_profile.py
rename to examples/Cooling/CoolingHaloWithSpin/internal_energy_profile.py
diff --git a/examples/CoolingHaloWithSpin/makeIC.py b/examples/Cooling/CoolingHaloWithSpin/makeIC.py
similarity index 98%
rename from examples/CoolingHaloWithSpin/makeIC.py
rename to examples/Cooling/CoolingHaloWithSpin/makeIC.py
index 2cf3127c743f61756b3ff6c4a7738c83d185f9cd..9a839bfd01594fd1d1c899d43223d0ebce12a72f 100644
--- a/examples/CoolingHaloWithSpin/makeIC.py
+++ b/examples/Cooling/CoolingHaloWithSpin/makeIC.py
@@ -92,11 +92,6 @@ grp.attrs["Unit time in cgs (U_t)"] = const_unit_length_in_cgs / const_unit_velo
 grp.attrs["Unit current in cgs (U_I)"] = 1.
 grp.attrs["Unit temperature in cgs (U_T)"] = 1.
 
-
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 np.random.seed(1234)
 
diff --git a/examples/CoolingHalo/makeIC_random_box.py b/examples/Cooling/CoolingHaloWithSpin/makeIC_random_box.py
similarity index 97%
rename from examples/CoolingHalo/makeIC_random_box.py
rename to examples/Cooling/CoolingHaloWithSpin/makeIC_random_box.py
index 4295cb135233f2d5a59405b44e6d8e9c80a1f6c0..be8f2f172e5b7aef385f0974445e44068021c99d 100644
--- a/examples/CoolingHalo/makeIC_random_box.py
+++ b/examples/Cooling/CoolingHaloWithSpin/makeIC_random_box.py
@@ -102,10 +102,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 np.random.seed(1234)
 
diff --git a/examples/CoolingHaloWithSpin/run.sh b/examples/Cooling/CoolingHaloWithSpin/run.sh
similarity index 77%
rename from examples/CoolingHaloWithSpin/run.sh
rename to examples/Cooling/CoolingHaloWithSpin/run.sh
index 131fbf3cb10d2014546683b5f43194840544fd55..17ec5251a1071e36413ba926d14a179c1d6ed36b 100755
--- a/examples/CoolingHaloWithSpin/run.sh
+++ b/examples/Cooling/CoolingHaloWithSpin/run.sh
@@ -5,7 +5,7 @@ echo "Generating initial conditions for the isothermal potential box example..."
 python makeIC.py 10000 
 
 # Run SWIFT with external potential, SPH and cooling
-../swift -g -s -C -t 1 cooling_halo.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --cooling --threads=1 cooling_halo.yml 2>&1 | tee output.log
 
 # python radial_profile.py 10
 
diff --git a/examples/CoolingHaloWithSpin/test_energy_conservation.py b/examples/Cooling/CoolingHaloWithSpin/test_energy_conservation.py
similarity index 100%
rename from examples/CoolingHaloWithSpin/test_energy_conservation.py
rename to examples/Cooling/CoolingHaloWithSpin/test_energy_conservation.py
diff --git a/examples/CoolingHaloWithSpin/velocity_profile.py b/examples/Cooling/CoolingHaloWithSpin/velocity_profile.py
similarity index 100%
rename from examples/CoolingHaloWithSpin/velocity_profile.py
rename to examples/Cooling/CoolingHaloWithSpin/velocity_profile.py
diff --git a/examples/Cooling/CoolingRates/Makefile.am b/examples/Cooling/CoolingRates/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..8bb0afa44436c5059f93b585ab6f8893752ce294
--- /dev/null
+++ b/examples/Cooling/CoolingRates/Makefile.am
@@ -0,0 +1,32 @@
+# tHIS FIle is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Add the source directory and the non-standard paths to the included library headers to CFLAGS
+AM_CFLAGS = -I$(top_srcdir)/src -I$(top_builddir)/examples $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS)
+
+AM_LDFLAGS = $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
+
+# Extra libraries.
+EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(VELOCIRAPTOR_LIBS) $(GSL_LIBS)
+
+# Programs.
+bin_PROGRAMS = cooling_rates
+
+# Sources
+cooling_rates_SOURCES = cooling_rates.c
+cooling_rates_CFLAGS = $(AM_CFLAGS)
+cooling_rates_LDADD =  ../../../src/.libs/libswiftsim.a $(EXTRA_LIBS)
+
diff --git a/examples/Cooling/CoolingRates/README b/examples/Cooling/CoolingRates/README
new file mode 100644
index 0000000000000000000000000000000000000000..7ac84c0e0aa64dc75d54a520c9997750b4977a39
--- /dev/null
+++ b/examples/Cooling/CoolingRates/README
@@ -0,0 +1,19 @@
+This is a test that produces a plot of the contribution to the cooling
+rate from each of the elements depending on internal energy, density
+and redshift based on the EAGLE tables. To do so, the function in
+src/cooling/EAGLE returning the cooling rate is run for multiple
+values of the internal energy. The resulting cooling rates are written
+to files and plotted with a python script (cooling_rates_plot.py).
+
+The test may be run by:
+./getCoolingTables.sh
+./cooling_rates -z X -d Y
+python plot_cooling_rates.py
+
+where X is the redshift at which the cooling rates are evaluated and Y
+is the base 10 logarithm of the hydrogen number density expressed in
+cgs (i.e. cm^-3). Different metallicities may be specified in
+testCooling.yml
+
+Running with -z 3 -d -4 should reproduce the Fig.4 of Wiersma+09 with
+the added Compton cooling contribution.
diff --git a/examples/Cooling/CoolingRates/cooling_rates.c b/examples/Cooling/CoolingRates/cooling_rates.c
new file mode 100644
index 0000000000000000000000000000000000000000..e7ff0340853e5cd5361286bd8af5c547681b4f63
--- /dev/null
+++ b/examples/Cooling/CoolingRates/cooling_rates.c
@@ -0,0 +1,314 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2015 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#include "config.h"
+
+/* Some standard headers. */
+#include <fenv.h>
+#include <unistd.h>
+
+/* Local headers. */
+#include "swift.h"
+
+#if defined(COOLING_EAGLE) && defined(CHEMISTRY_EAGLE) && defined(GADGET2_SPH)
+#include "cooling/EAGLE/cooling_rates.h"
+#include "cooling/EAGLE/cooling_tables.h"
+
+/* Flag used for printing cooling rate contribution from each
+ * element. For testing only. Incremented by 1/(number of elements)
+ * until reaches 1 after which point append to files instead of
+ * writing new file. */
+static float print_cooling_rate_contribution_flag = 0;
+
+/**
+ * @brief Wrapper function used to calculate cooling rate and dLambda_du.
+ * Writes to file contribution from each element to cooling rate for testing
+ * purposes (this function is not used when running SWIFT). Table indices
+ * and offsets for redshift, hydrogen number density and helium fraction are
+ * passed in so as to compute them only once per particle.
+ *
+ * @param n_h_i Particle hydrogen number density index
+ * @param d_n_h Particle hydrogen number density offset
+ * @param He_i Particle helium fraction index
+ * @param d_He Particle helium fraction offset
+ * @param p Particle structure
+ * @param cooling #cooling_function_data structure
+ * @param cosmo #cosmology structure
+ * @param phys_const #phys_const structure
+ * @param abundance_ratio Ratio of element abundance to solar
+ */
+INLINE static double eagle_print_metal_cooling_rate(
+    int n_h_i, float d_n_h, int He_i, float d_He, const struct part *restrict p,
+    const struct xpart *restrict xp,
+    const struct cooling_function_data *restrict cooling,
+    const struct cosmology *restrict cosmo, const struct phys_const *phys_const,
+    float *abundance_ratio) {
+
+  /* array to store contributions to cooling rates from each of the
+   * elements */
+  double *element_lambda;
+  element_lambda = malloc((eagle_cooling_N_metal + 2) * sizeof(double));
+
+  /* Get the H and He mass fractions */
+  const float XH = p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+
+  /* convert Hydrogen mass fraction in Hydrogen number density */
+  const double n_h = hydro_get_physical_density(p, cosmo) * XH /
+                     phys_const->const_proton_mass *
+                     cooling->number_density_to_cgs;
+
+  /* cooling rate, derivative of cooling rate and internal energy */
+  double lambda_net = 0.0;
+  double u = hydro_get_physical_internal_energy(p, xp, cosmo) *
+             cooling->internal_energy_to_cgs;
+
+  /* Open files for writing contributions to cooling rate. Each element
+   * gets its own file.  */
+  char output_filename[32];
+  FILE **output_file = malloc((eagle_cooling_N_metal + 2) * sizeof(FILE *));
+
+  /* Once this flag reaches 1 we stop overwriting and start appending.  */
+  print_cooling_rate_contribution_flag += 1.0 / (eagle_cooling_N_metal + 2);
+
+  /* Loop over each element */
+  for (int element = 0; element < eagle_cooling_N_metal + 2; element++) {
+    sprintf(output_filename, "%s%d%s", "cooling_element_", element, ".dat");
+    if (print_cooling_rate_contribution_flag < 1) {
+      /* If this is the first time we're running this function, overwrite the
+       * output files */
+      output_file[element] = fopen(output_filename, "w");
+      print_cooling_rate_contribution_flag += 1.0 / (eagle_cooling_N_metal + 2);
+    } else {
+      /* append to existing files */
+      output_file[element] = fopen(output_filename, "a");
+    }
+    if (output_file == NULL) {
+      error("Error opening file!\n");
+    }
+  }
+
+  /* calculate cooling rates */
+  for (int j = 0; j < eagle_cooling_N_metal + 2; j++) element_lambda[j] = 0.0;
+  lambda_net = eagle_metal_cooling_rate(
+      log10(u), cosmo->z, n_h, abundance_ratio, n_h_i, d_n_h, He_i, d_He,
+      cooling, /*dLambdaNet_du=*/NULL, element_lambda);
+
+  /* write cooling rate contributions to their own files. */
+  for (int j = 0; j < eagle_cooling_N_metal + 2; j++) {
+    fprintf(output_file[j], "%.5e\n", element_lambda[j]);
+  }
+
+  for (int i = 0; i < eagle_cooling_N_metal + 2; i++) fclose(output_file[i]);
+  free(output_file);
+  free(element_lambda);
+
+  return lambda_net;
+}
+
+/**
+ * @brief Assign particle density and entropy corresponding to the
+ * hydrogen number density and internal energy specified.
+ *
+ * @param p Particle data structure
+ * @param cooling Cooling function data structure
+ * @param cosmo Cosmology data structure
+ * @param internal_const Physical constants data structure
+ * @param nh Hydrogen number density (cgs units)
+ * @param u Internal energy (cgs units)
+ */
+void set_quantities(struct part *restrict p, struct xpart *restrict xp,
+                    const struct unit_system *restrict us,
+                    const struct cooling_function_data *restrict cooling,
+                    const struct cosmology *restrict cosmo,
+                    const struct phys_const *restrict internal_const, float nh,
+                    double u) {
+
+  double hydrogen_number_density =
+      nh * pow(units_cgs_conversion_factor(us, UNIT_CONV_LENGTH), 3);
+  p->rho = hydrogen_number_density * internal_const->const_proton_mass /
+           p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+
+  float pressure = (u * cosmo->a * cosmo->a) *
+                   cooling->internal_energy_from_cgs * p->rho *
+                   (hydro_gamma_minus_one);
+  p->entropy = pressure * (pow(p->rho, -hydro_gamma));
+  xp->entropy_full = p->entropy;
+}
+
+/**
+ * @brief Produces contributions to cooling rates for different
+ * hydrogen number densities, from different metals,
+ * tests 1d and 4d table interpolations produce
+ * same results for cooling rate, dlambda/du and temperature.
+ */
+int main(int argc, char **argv) {
+  // Declare relevant structs
+  struct swift_params *params = malloc(sizeof(struct swift_params));
+  struct unit_system us;
+  struct chemistry_global_data chem_data;
+  struct part p;
+  struct xpart xp;
+  struct phys_const internal_const;
+  struct cooling_function_data cooling;
+  struct cosmology cosmo;
+  const char *parametersFileName = "./cooling_rates.yml";
+
+  /* Initialize CPU frequency, this also starts time. */
+  unsigned long long cpufreq = 0;
+  clocks_set_cpufreq(cpufreq);
+
+/* Choke on FP-exceptions */
+#ifdef HAVE_FE_ENABLE_EXCEPT
+  feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
+#endif
+
+  const int npts = 250;  // number of values for the internal energy at which
+                         // cooling rate is evaluated
+
+  // Set some default values
+  float redshift = 0.0, log_10_nh = -1;
+
+  // Read options
+  int param;
+  while ((param = getopt(argc, argv, "z:d:")) != -1) switch (param) {
+      case 'z':
+        // read redshift
+        redshift = atof(optarg);
+        break;
+      case 'd':
+        // read log10 of hydrogen number density
+        log_10_nh = atof(optarg);
+        break;
+      case '?':
+        if (optopt == 'z')
+          printf("Option -%c requires an argument.\n", optopt);
+        else
+          printf("Unknown option character `\\x%x'.\n", optopt);
+        error("invalid option(s) to cooling_rates");
+    }
+
+  // Read the parameter file
+  if (params == NULL) error("Error allocating memory for the parameter file.");
+  message("Reading runtime parameters from file '%s'", parametersFileName);
+  parser_read_file(parametersFileName, params);
+
+  // Init units
+  units_init_from_params(&us, params, "InternalUnitSystem");
+  phys_const_init(&us, params, &internal_const);
+
+  // Init chemistry
+  chemistry_init(params, &us, &internal_const, &chem_data);
+  chemistry_first_init_part(&internal_const, &us, &cosmo, &chem_data, &p, &xp);
+  chemistry_print(&chem_data);
+
+  // Init cosmology
+  cosmology_init(params, &us, &internal_const, &cosmo);
+
+  // Set redshift and associated quantities
+  const float scale_factor = 1.0 / (1.0 + redshift);
+  integertime_t ti_current =
+      log(scale_factor / cosmo.a_begin) / cosmo.time_base;
+  cosmology_update(&cosmo, &internal_const, ti_current);
+  message("Redshift is %f", cosmo.z);
+
+  // Init cooling
+  cooling_init(params, &us, &internal_const, &cooling);
+  cooling_print(&cooling);
+  cooling_update(&cosmo, &cooling);
+
+  // Calculate abundance ratios
+  float abundance_ratio[(chemistry_element_count + 2)];
+  abundance_ratio_to_solar(&p, &cooling, abundance_ratio);
+
+  // extract mass fractions, calculate table indices and offsets
+  float XH = p.chemistry_data.metal_mass_fraction[chemistry_element_H];
+  float HeFrac =
+      p.chemistry_data.metal_mass_fraction[chemistry_element_He] /
+      (XH + p.chemistry_data.metal_mass_fraction[chemistry_element_He]);
+  int He_i, n_h_i;
+  float d_He, d_n_h;
+  get_index_1d(cooling.HeFrac, eagle_cooling_N_He_frac, HeFrac, &He_i, &d_He);
+
+  // Calculate contributions from metals to cooling rate
+  // open file
+  FILE *output_file = fopen("cooling_output.dat", "w");
+  if (output_file == NULL) {
+    error("Error opening output file!\n");
+  }
+
+  // set hydrogen number density
+  const float nh = exp(M_LN10 * log_10_nh);
+
+  /* Initial internal energy */
+  double u = 1.0e14;
+
+  // set internal energy to dummy value, will get reset when looping over
+  // internal energies
+  set_quantities(&p, &xp, &us, &cooling, &cosmo, &internal_const, nh, u);
+  float inn_h = hydro_get_physical_density(&p, &cosmo) * XH /
+                internal_const.const_proton_mass *
+                cooling.number_density_to_cgs;
+  get_index_1d(cooling.nH, eagle_cooling_N_density, log10(inn_h), &n_h_i,
+               &d_n_h);
+
+  // Loop over internal energy
+  for (int j = 0; j < npts; j++) {
+
+    // Update the particle with the new values
+    set_quantities(&p, &xp, &us, &cooling, &cosmo, &internal_const, nh,
+                   pow(10.0, 10.0 + j * 8.0 / npts));
+
+    // New internal energy
+    u = hydro_get_physical_internal_energy(&p, &xp, &cosmo) *
+        cooling.internal_energy_to_cgs;
+
+    // calculate cooling rates
+    const double temperature = eagle_convert_u_to_temp(
+        log10(u), cosmo.z, 0, NULL, n_h_i, He_i, d_n_h, d_He, &cooling);
+
+    const double cooling_du_dt = eagle_print_metal_cooling_rate(
+        n_h_i, d_n_h, He_i, d_He, &p, &xp, &cooling, &cosmo, &internal_const,
+        abundance_ratio);
+
+    // Dump...
+    fprintf(output_file, "%.5e %.5e\n", exp(M_LN10 * temperature),
+            cooling_du_dt);
+  }
+  fclose(output_file);
+  message("done cooling rates test");
+
+  /* Clean everything */
+  cosmology_clean(&cosmo);
+  cooling_clean(&cooling);
+
+  free(params);
+  return 0;
+}
+
+#else
+
+int main(int argc, char **argv) {
+
+  /* Initialize CPU frequency, this also starts time. */
+  unsigned long long cpufreq = 0;
+  clocks_set_cpufreq(cpufreq);
+
+  message("This test is only defined for the EAGLE cooling model.");
+  return 0;
+}
+#endif
diff --git a/examples/Cooling/CoolingRates/cooling_rates.yml b/examples/Cooling/CoolingRates/cooling_rates.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1d67ad0af79368c39c24879b101a540d0bccb3d1
--- /dev/null
+++ b/examples/Cooling/CoolingRates/cooling_rates.yml
@@ -0,0 +1,36 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.989e43      # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.085678e24   # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.04          # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+
+EAGLEChemistry:
+  init_abundance_metal:     0.014
+  init_abundance_Hydrogen:  0.70649785
+  init_abundance_Helium:    0.28055534
+  init_abundance_Carbon:    2.0665436e-3
+  init_abundance_Nitrogen:  8.3562563e-4
+  init_abundance_Oxygen:    5.4926244e-3
+  init_abundance_Neon:      1.4144605e-3
+  init_abundance_Magnesium: 5.907064e-4
+  init_abundance_Silicon:   6.825874e-4
+  init_abundance_Iron:      1.1032152e-3
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
diff --git a/examples/Cooling/CoolingRates/getCoolingTable.sh b/examples/Cooling/CoolingRates/getCoolingTable.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5cfd93ef0f4603e40b7675f3f2c254b2250f699f
--- /dev/null
+++ b/examples/Cooling/CoolingRates/getCoolingTable.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/EAGLE/coolingtables.tar.gz
+tar -xf coolingtables.tar.gz 
diff --git a/examples/Cooling/CoolingRates/plot_cooling_rates.py b/examples/Cooling/CoolingRates/plot_cooling_rates.py
new file mode 100644
index 0000000000000000000000000000000000000000..cca12468befd8c407d769ee00fcb03ecd52db3ec
--- /dev/null
+++ b/examples/Cooling/CoolingRates/plot_cooling_rates.py
@@ -0,0 +1,53 @@
+# Plots contribution to cooling rates from each of the different metals
+# based on cooling_output.dat and cooling_element_*.dat files produced
+# by testCooling.
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+# Number of metals tracked by EAGLE cooling
+elements = 11
+
+# Declare arrays of internal energy and cooling rate
+u = []
+cooling_rate = [[] for i in range(elements + 1)]
+Temperature = [[] for i in range(elements + 1)]
+
+# Read in total cooling rate
+file_in = open("cooling_output.dat", "r")
+for line in file_in:
+    data = line.split()
+    u.append(float(data[0]))
+    cooling_rate[0].append(-float(data[1]))
+file_in.close()
+
+# Read in contributions to cooling rates from each of the elements
+for elem in range(elements):
+    file_in = open("cooling_element_" + str(elem) + ".dat", "r")
+    for line in file_in:
+        data = line.split()
+        cooling_rate[elem + 1].append(-float(data[0]))
+    file_in.close()
+
+# Plot
+ax = plt.subplot(111)
+p0, = plt.loglog(u, cooling_rate[0], linewidth=0.5, color="k", label="Total")
+p1, = plt.loglog(
+    u, cooling_rate[1], linewidth=0.5, color="k", linestyle="--", label="H + He"
+)
+p2, = plt.loglog(u, cooling_rate[3], linewidth=0.5, color="b", label="C")
+p3, = plt.loglog(u, cooling_rate[4], linewidth=0.5, color="g", label="N")
+p4, = plt.loglog(u, cooling_rate[5], linewidth=0.5, color="r", label="O")
+p5, = plt.loglog(u, cooling_rate[6], linewidth=0.5, color="c", label="Ne")
+p6, = plt.loglog(u, cooling_rate[7], linewidth=0.5, color="m", label="Mg")
+p7, = plt.loglog(u, cooling_rate[8], linewidth=0.5, color="y", label="Si")
+p8, = plt.loglog(u, cooling_rate[9], linewidth=0.5, color="lightgray", label="S")
+p9, = plt.loglog(u, cooling_rate[10], linewidth=0.5, color="olive", label="Ca")
+p10, = plt.loglog(u, cooling_rate[11], linewidth=0.5, color="saddlebrown", label="Fe")
+ax.set_position([0.15, 0.15, 0.75, 0.75])
+plt.xlim([1e3, 1e8])
+plt.ylim([1e-24, 1e-21])
+plt.xlabel("Temperature ${\\rm{[K]}}$", fontsize=14)
+plt.ylabel("${\Lambda/n_H^2 }$ ${\\rm{[erg \cdot cm^3 \cdot s^{-1}]}}$", fontsize=14)
+plt.legend(handles=[p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10])
+plt.savefig("cooling_rates", dpi=200)
diff --git a/examples/getCoolingTable.sh b/examples/Cooling/getCoolingTable.sh
old mode 100644
new mode 100755
similarity index 100%
rename from examples/getCoolingTable.sh
rename to examples/Cooling/getCoolingTable.sh
diff --git a/examples/CoolingBox/energy_plot.py b/examples/CoolingBox/energy_plot.py
deleted file mode 100644
index 45f0b4f6b11c3855a919f6a98fd0ca006a887f82..0000000000000000000000000000000000000000
--- a/examples/CoolingBox/energy_plot.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import matplotlib
-matplotlib.use("Agg")
-from pylab import *
-import h5py
-
-# Plot parameters
-params = {'axes.labelsize': 10,
-'axes.titlesize': 10,
-'font.size': 12,
-'legend.fontsize': 12,
-'xtick.labelsize': 10,
-'ytick.labelsize': 10,
-'text.usetex': True,
- 'figure.figsize' : (3.15,3.15),
-'figure.subplot.left'    : 0.145,
-'figure.subplot.right'   : 0.99,
-'figure.subplot.bottom'  : 0.11,
-'figure.subplot.top'     : 0.99,
-'figure.subplot.wspace'  : 0.15,
-'figure.subplot.hspace'  : 0.12,
-'lines.markersize' : 6,
-'lines.linewidth' : 3.,
-'text.latex.unicode': True
-}
-rcParams.update(params)
-rc('font',**{'family':'sans-serif','sans-serif':['Times']})
-
-
-import numpy as np
-import h5py as h5
-import sys
-
-# File containing the total energy
-stats_filename = "./energy.txt"
-
-# First snapshot
-snap_filename = "coolingBox_0000.hdf5"
-
-# Some constants in cgs units
-k_b = 1.38E-16 #boltzmann
-m_p = 1.67e-24 #proton mass
-
-# Initial conditions set in makeIC.py
-T_init = 1.0e5
-
-# Read the initial state of the gas
-f = h5.File(snap_filename,'r')
-rho = np.mean(f["/PartType0/Density"])
-pressure = np.mean(f["/PartType0/Pressure"])
-
-# Read the units parameters from the snapshot
-units = f["InternalCodeUnits"]
-unit_mass = units.attrs["Unit mass in cgs (U_M)"]
-unit_length = units.attrs["Unit length in cgs (U_L)"]
-unit_time = units.attrs["Unit time in cgs (U_t)"]
-
-# Read the properties of the cooling function
-parameters = f["Parameters"]
-cooling_lambda = float(parameters.attrs["LambdaCooling:lambda_cgs"])
-min_T = float(parameters.attrs["LambdaCooling:minimum_temperature"])
-mu = float(parameters.attrs["LambdaCooling:mean_molecular_weight"])
-X_H = float(parameters.attrs["LambdaCooling:hydrogen_mass_abundance"])
-
-# Read the adiabatic index
-gamma = float(f["HydroScheme"].attrs["Adiabatic index"])
-
-print "Initial density :", rho
-print "Initial pressure:", pressure
-print "Adiabatic index :", gamma
-
-# Read energy and time arrays
-array = np.genfromtxt(stats_filename,skip_header = 1)
-time = array[:,0]
-total_mass = array[:,1]
-total_energy = array[:,2]
-kinetic_energy = array[:,3]
-internal_energy = array[:,4]
-radiated_energy = array[:,8]
-initial_energy = total_energy[0]
-
-# Conversions to cgs
-rho_cgs = rho * unit_mass / (unit_length)**3
-time_cgs = time * unit_time
-total_energy_cgs = total_energy / total_mass[0] * unit_length**2 / (unit_time)**2
-kinetic_energy_cgs = kinetic_energy / total_mass[0] * unit_length**2 / (unit_time)**2
-internal_energy_cgs = internal_energy / total_mass[0] * unit_length**2 / (unit_time)**2
-radiated_energy_cgs = radiated_energy / total_mass[0] * unit_length**2 / (unit_time)**2  
-
-# Find the energy floor
-u_floor_cgs = k_b * min_T / (mu * m_p * (gamma - 1.))
-
-# Find analytic solution
-initial_energy_cgs = initial_energy/total_mass[0] * unit_length**2 / (unit_time)**2 
-n_H_cgs = X_H * rho_cgs / m_p
-du_dt_cgs = -cooling_lambda * n_H_cgs**2 / rho_cgs
-cooling_time_cgs = (initial_energy_cgs/(-du_dt_cgs))[0]
-analytic_time_cgs = np.linspace(0, cooling_time_cgs * 1.8, 1000)
-u_analytic_cgs = du_dt_cgs*analytic_time_cgs + initial_energy_cgs
-u_analytic_cgs[u_analytic_cgs < u_floor_cgs] = u_floor_cgs
-
-print "Cooling time:", cooling_time_cgs, "[s]"
-
-# Read snapshots
-u_snapshots_cgs = zeros(25)
-t_snapshots_cgs = zeros(25)
-for i in range(25):
-    snap = h5.File("coolingBox_%0.4d.hdf5"%i,'r')
-    u_snapshots_cgs[i] = sum(snap["/PartType0/InternalEnergy"][:] * snap["/PartType0/Masses"][:])  / total_mass[0] * unit_length**2 / (unit_time)**2
-    t_snapshots_cgs[i] = snap["/Header"].attrs["Time"] * unit_time
-
-
-figure()
-plot(time_cgs, total_energy_cgs, 'r-', lw=1.6, label="Gas total energy")
-plot(t_snapshots_cgs, u_snapshots_cgs, 'rD', ms=3)
-plot(time_cgs, radiated_energy_cgs, 'g-', lw=1.6, label="Radiated energy")
-plot(time_cgs, total_energy_cgs + radiated_energy_cgs, 'b-', lw=0.6, label="Gas total + radiated")
-
-plot(analytic_time_cgs, u_analytic_cgs, '--', color='k', alpha=0.8, lw=1.0, label="Analytic solution")
-
-legend(loc="upper right", fontsize=8, frameon=False, handlelength=3, ncol=1)
-xlabel("${\\rm{Time~[s]}}$", labelpad=0)
-ylabel("${\\rm{Energy~[erg]}}$")
-xlim(0, 1.5*cooling_time_cgs)
-ylim(0, 1.5*u_analytic_cgs[0])
-
-savefig("energy.png", dpi=200)
-
-
diff --git a/examples/CoolingBox/makeIC.py b/examples/CoolingBox/makeIC.py
deleted file mode 100644
index f863e174b1fcd404ae178fe324c7a165598b4af0..0000000000000000000000000000000000000000
--- a/examples/CoolingBox/makeIC.py
+++ /dev/null
@@ -1,103 +0,0 @@
-###############################################################################
- # This file is part of SWIFT.
- # Copyright (c) 2016 Stefan Arridge (stefan.arridge@durhama.ac.uk)
- #                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- # 
- # This program is free software: you can redistribute it and/or modify
- # it under the terms of the GNU Lesser General Public License as published
- # by the Free Software Foundation, either version 3 of the License, or
- # (at your option) any later version.
- # 
- # This program is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- # GNU General Public License for more details.
- # 
- # You should have received a copy of the GNU Lesser General Public License
- # along with this program.  If not, see <http://www.gnu.org/licenses/>.
- # 
- ##############################################################################
-
-import h5py
-import sys
-from numpy import *
-
-# Generates a SWIFT IC file with a constant density and pressure
-
-# Parameters
-periodic= 1           # 1 For periodic box
-boxSize = 1           # 1 kiloparsec    
-rho = 3.2e3           # Density in code units (3.2e6 is 0.1 hydrogen atoms per cm^3)
-P = 4.5e6             # Pressure in code units (at 10^5K)
-gamma = 5./3.         # Gas adiabatic index
-eta = 1.2349          # 48 ngbs with cubic spline kernel
-fileName = "coolingBox.hdf5" 
-
-#---------------------------------------------------
-
-# Read id, position and h from glass
-glass = h5py.File("glassCube_32.hdf5", "r")
-ids = glass["/PartType0/ParticleIDs"][:]
-pos = glass["/PartType0/Coordinates"][:,:] * boxSize
-h = glass["/PartType0/SmoothingLength"][:] * boxSize
-
-# Compute basic properties
-numPart = size(pos) / 3
-mass = boxSize**3 * rho / numPart
-internalEnergy = P / ((gamma - 1.) * rho)
-
-#File
-file = h5py.File(fileName, 'w')
-
-# Header
-grp = file.create_group("/Header")
-grp.attrs["BoxSize"] = boxSize
-grp.attrs["NumPart_Total"] =  [numPart, 0, 0, 0, 0, 0]
-grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
-grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
-grp.attrs["Time"] = 0.0
-grp.attrs["NumFilesPerSnapshot"] = 1
-grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
-grp.attrs["Flag_Entropy_ICs"] = 0
-
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
-# Units
-grp = file.create_group("/Units")
-grp.attrs["Unit length in cgs (U_L)"] = 3.0857e21 
-grp.attrs["Unit mass in cgs (U_M)"] = 2.0e33 
-grp.attrs["Unit time in cgs (U_t)"] = 3.0857e16 
-grp.attrs["Unit current in cgs (U_I)"] = 1.
-grp.attrs["Unit temperature in cgs (U_T)"] = 1.
-
-#Particle group
-grp = file.create_group("/PartType0")
-
-v  = zeros((numPart, 3))
-ds = grp.create_dataset('Velocities', (numPart, 3), 'f')
-ds[()] = v
-
-m = full((numPart, 1), mass)
-ds = grp.create_dataset('Masses', (numPart,1), 'f')
-ds[()] = m
-
-h = reshape(h, (numPart, 1))
-ds = grp.create_dataset('SmoothingLength', (numPart, 1), 'f')
-ds[()] = h
-
-u = full((numPart, 1), internalEnergy)
-ds = grp.create_dataset('InternalEnergy', (numPart,1), 'f')
-ds[()] = u
-
-ids = reshape(ids, (numPart, 1))
-ds = grp.create_dataset('ParticleIDs', (numPart, 1), 'L')
-ds[()] = ids
-
-ds = grp.create_dataset('Coordinates', (numPart, 3), 'd')
-ds[()] = pos
-
-file.close()
-
-print numPart
diff --git a/examples/Cosmology/ComovingSodShock_1D/makeIC.py b/examples/Cosmology/ComovingSodShock_1D/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..371acc685cd00bd211e024ecc17c976ea5a08f68
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_1D/makeIC.py
@@ -0,0 +1,123 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+import h5py
+from numpy import *
+
+# Generates a swift IC file for the 1D Sod Shock in a periodic box
+
+unit_l_in_cgs = 3.086e18
+unit_m_in_cgs = 2.94e55
+unit_t_in_cgs = 3.086e18
+
+# Parameters
+gamma = 5./3.          # Gas adiabatic index
+numPart_L = 800        # Number of particles in the left state
+x_min = -1.
+x_max = 1.
+rho_L = 1.             # Density left state
+rho_R = 0.125          # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+a_beg = 0.001
+fileName = "sodShock.hdf5" 
+
+
+#---------------------------------------------------
+
+# Find how many particles we actually have
+boxSize = x_max - x_min
+numPart_R = int(numPart_L * (rho_R / rho_L))
+numPart = numPart_L + numPart_R
+
+# Now get the distances
+delta_L = (boxSize/2)  / numPart_L
+delta_R = (boxSize/2)  / numPart_R
+offset_L = delta_L / 2
+offset_R = delta_R / 2
+
+# Build the arrays
+coords = zeros((numPart, 3))
+v = zeros((numPart, 3))
+ids = linspace(1, numPart, numPart)
+m = zeros(numPart)
+h = zeros(numPart)
+u = zeros(numPart)
+
+# Set the particles on the left
+for i in range(numPart_L):
+    coords[i,0] = x_min + offset_L + i * delta_L
+    u[i] = P_L / (rho_L * (gamma - 1.))
+    h[i] = 1.2348 * delta_L
+    m[i] = boxSize * rho_L / (2. * numPart_L)
+    v[i,0] = v_L
+    
+# Set the particles on the right
+for j in range(numPart_R):
+    i = numPart_L + j
+    coords[i,0] = offset_R + j * delta_R
+    u[i] = P_R / (rho_R * (gamma - 1.))
+    h[i] = 1.2348 * delta_R
+    m[i] = boxSize * rho_R / (2. * numPart_R)
+    v[i,0] = v_R
+
+# Shift particles
+coords[:,0] -= x_min
+
+u /= (a_beg**(3. * (gamma - 1.)))
+    
+#File
+file = h5py.File(fileName, 'w')
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = boxSize
+grp.attrs["NumPart_Total"] =  [numPart, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = 0
+grp.attrs["Dimension"] = 1
+
+#Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = unit_l_in_cgs
+grp.attrs["Unit mass in cgs (U_M)"] = unit_m_in_cgs
+grp.attrs["Unit time in cgs (U_t)"] = unit_t_in_cgs
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
+#Particle group
+grp = file.create_group("/PartType0")
+grp.create_dataset('Coordinates', data=coords, dtype='d')
+grp.create_dataset('Velocities', data=v, dtype='f')
+grp.create_dataset('Masses', data=m, dtype='f')
+grp.create_dataset('SmoothingLength', data=h, dtype='f')
+grp.create_dataset('InternalEnergy', data=u, dtype='f')
+grp.create_dataset('ParticleIDs', data=ids, dtype='L')
+
+
+file.close()
+
+
diff --git a/examples/Cosmology/ComovingSodShock_1D/plotSolution.py b/examples/Cosmology/ComovingSodShock_1D/plotSolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..95674c04bfafd0cd549b69814df82f9a4f80a949
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_1D/plotSolution.py
@@ -0,0 +1,310 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+# Computes the analytical solution of the Sod shock and plots the SPH answer
+ 
+
+# Generates the analytical  solution for the Sod shock test case
+# The script works for a given left (x<0) and right (x>0) state and computes the solution at a later time t.
+# This follows the solution given in (Toro, 2009)
+
+
+# Parameters
+gas_gamma = 5./3.      # Polytropic index
+rho_L = 1.             # Density left state
+rho_R = 0.125          # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 12,
+'legend.fontsize': 12,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (9.90,6.45),
+'figure.subplot.left'    : 0.045,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.05,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 3.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+
+snap = int(sys.argv[1])
+
+
+# Read the simulation data
+sim = h5py.File("sodShock_%04d.hdf5"%snap, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+anow = sim["/Header"].attrs["Scale-factor"]
+a_i = sim["/Cosmology"].attrs["a_beg"]
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"]
+time = 2. * (1. / np.sqrt(a_i) - 1. / np.sqrt(anow)) / H_0
+scheme = str(sim["/HydroScheme"].attrs["Scheme"])
+kernel = str(sim["/HydroScheme"].attrs["Kernel function"])
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"]
+eta = sim["/HydroScheme"].attrs["Kernel eta"]
+git = str(sim["Code"].attrs["Git Revision"])
+
+x = sim["/PartType0/Coordinates"][:,0]
+v = sim["/PartType0/Velocities"][:,0] * anow
+u = sim["/PartType0/InternalEnergy"][:]
+S = sim["/PartType0/Entropy"][:]
+P = sim["/PartType0/Pressure"][:]
+rho = sim["/PartType0/Density"][:]
+try:
+    alpha = sim["/PartType0/Viscosity"][:]
+    plot_alpha = True 
+except:
+    plot_alpha = False
+
+N = 1000  # Number of points
+x_min = -1.
+x_max = 1.
+
+x += x_min
+
+# ---------------------------------------------------------------
+# Don't touch anything after this.
+# ---------------------------------------------------------------
+
+c_L = sqrt(gas_gamma * P_L / rho_L)   # Speed of the rarefaction wave
+c_R = sqrt(gas_gamma * P_R / rho_R)   # Speed of the shock front
+
+# Helpful variable
+Gama = (gas_gamma - 1.) / (gas_gamma + 1.)
+beta = (gas_gamma - 1.) / (2. * gas_gamma)
+
+# Characteristic function and its derivative, following Toro (2009)
+def compute_f(P_3, P, c):
+    u = P_3 / P
+    if u > 1:
+        term1 = gas_gamma*((gas_gamma+1.)*u + gas_gamma-1.)
+        term2 = sqrt(2./term1)
+        fp = (u - 1.)*c*term2
+        dfdp = c*term2/P + (u - 1.)*c/term2*(-1./term1**2)*gas_gamma*(gas_gamma+1.)/P
+    else:
+        fp = (u**beta - 1.)*(2.*c/(gas_gamma-1.))
+        dfdp = 2.*c/(gas_gamma-1.)*beta*u**(beta-1.)/P
+    return (fp, dfdp)
+
+# Solution of the Riemann problem following Toro (2009) 
+def RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R):
+    P_new = ((c_L + c_R + (v_L - v_R)*0.5*(gas_gamma-1.))/(c_L / P_L**beta + c_R / P_R**beta))**(1./beta)
+    P_3 = 0.5*(P_R + P_L)
+    f_L = 1.
+    while fabs(P_3 - P_new) > 1e-6:
+        P_3 = P_new
+        (f_L, dfdp_L) = compute_f(P_3, P_L, c_L)
+        (f_R, dfdp_R) = compute_f(P_3, P_R, c_R)
+        f = f_L + f_R + (v_R - v_L)
+        df = dfdp_L + dfdp_R
+        dp =  -f/df
+        prnew = P_3 + dp
+    v_3 = v_L - f_L
+    return (P_new, v_3)
+
+
+# Solve Riemann problem for post-shock region
+(P_3, v_3) = RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R)
+
+# Check direction of shocks and wave
+shock_R = (P_3 > P_R)
+shock_L = (P_3 > P_L)
+
+# Velocity of shock front and and rarefaction wave
+if shock_R:
+    v_right = v_R + c_R**2*(P_3/P_R - 1.)/(gas_gamma*(v_3-v_R))
+else:
+    v_right = c_R + 0.5*(gas_gamma+1.)*v_3 - 0.5*(gas_gamma-1.)*v_R
+
+if shock_L:
+    v_left = v_L + c_L**2*(P_3/p_L - 1.)/(gas_gamma*(v_3-v_L))
+else:
+    v_left = c_L - 0.5*(gas_gamma+1.)*v_3 + 0.5*(gas_gamma-1.)*v_L
+
+# Compute position of the transitions
+x_23 = -fabs(v_left) * time
+if shock_L :
+    x_12 = -fabs(v_left) * time
+else:
+    x_12 = -(c_L - v_L) * time
+
+x_34 = v_3 * time
+
+x_45 = fabs(v_right) * time
+if shock_R:
+    x_56 = fabs(v_right) * time
+else:
+    x_56 = (c_R + v_R) * time
+
+
+# Prepare arrays
+delta_x = (x_max - x_min) / N
+x_s = arange(x_min, x_max, delta_x)
+rho_s = zeros(N)
+P_s = zeros(N)
+v_s = zeros(N)
+
+# Compute solution in the different regions
+for i in range(N):
+    if x_s[i] <= x_12:
+        rho_s[i] = rho_L
+        P_s[i] = P_L
+        v_s[i] = v_L
+    if x_s[i] >= x_12 and x_s[i] < x_23:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1. + Gama * P_3/P_L)
+            P_s[i] = P_3
+            v_s[i] = v_3
+        else:
+            rho_s[i] = rho_L*(Gama * (0. - x_s[i])/(c_L * time) + Gama * v_L/c_L + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = P_L*(rho_s[i] / rho_L)**gas_gamma
+            v_s[i] = (1.-Gama)*(c_L -(0. - x_s[i]) / time) + Gama*v_L
+    if x_s[i] >= x_23 and x_s[i] < x_34:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1+Gama * P_3/p_L)
+        else:
+            rho_s[i] = rho_L*(P_3 / P_L)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_34 and x_s[i] < x_45:
+        if shock_R:
+            rho_s[i] = rho_R*(Gama + P_3/P_R)/(1. + Gama * P_3/P_R)
+        else:
+            rho_s[i] = rho_R*(P_3 / P_R)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_45 and x_s[i] < x_56:
+        if shock_R:
+            rho_s[i] = rho_R
+            P_s[i] = P_R
+            v_s[i] = v_R
+        else:
+            rho_s[i] = rho_R*(Gama*(x_s[i])/(c_R*time) - Gama*v_R/c_R + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = p_R*(rho_s[i]/rho_R)**gas_gamma
+            v_s[i] = (1.-Gama)*(-c_R - (-x_s[i])/time) + Gama*v_R
+    if x_s[i] >= x_56:
+        rho_s[i] = rho_R
+        P_s[i] = P_R
+        v_s[i] = v_R
+
+
+# Additional arrays
+u_s = P_s / (rho_s * (gas_gamma - 1.))  #internal energy
+s_s = P_s / rho_s**gas_gamma # entropic function
+        
+
+# Plot the interesting quantities
+figure()
+
+# Velocity profile --------------------------------
+subplot(231)
+plot(x, v, '.', color='r', ms=4.0)
+plot(x_s, v_s, '--', color='k', alpha=0.8, lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Velocity}}~v_x$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(-0.1, 0.95)
+
+# Density profile --------------------------------
+subplot(232)
+plot(x, rho, '.', color='r', ms=4.0)
+plot(x_s, rho_s, '--', color='k', alpha=0.8, lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Density}}~\\rho$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.05, 1.1)
+
+# Pressure profile --------------------------------
+subplot(233)
+plot(x, P, '.', color='r', ms=4.0)
+plot(x_s, P_s, '--', color='k', alpha=0.8, lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Pressure}}~P$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.01, 1.1)
+
+# Internal energy profile -------------------------
+subplot(234)
+plot(x, u, '.', color='r', ms=4.0)
+plot(x_s, u_s, '--', color='k', alpha=0.8, lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Internal~Energy}}~u$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.8, 2.2)
+
+# Entropy/alpha profile ---------------------------------
+subplot(235)
+
+if plot_alpha:
+    plot(x, alpha, '.', color='r', ms=4.0)
+    ylabel(r"${\rm{Viscosity}}~\alpha$", labelpad=0)
+    # Show location of shock
+    plot([x_56, x_56], [-100, 100], color="k", alpha=0.5, ls="dashed", lw=1.2)
+    ylim(0, 1)
+else:
+    plot(x, S, '.', color='r', ms=4.0)
+    plot(x_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+    ylabel("${\\rm{Entropy}}~S$", labelpad=0)
+    ylim(0.8, 3.8)
+
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+xlim(-0.5, 0.5)
+
+# Information -------------------------------------
+subplot(236, frameon=False)
+
+z_now = 1. / anow - 1.
+text(-0.49, 0.9, "Sod shock with  $\\gamma=%.3f$ in 1D at $z=%.2f$"%(gas_gamma,z_now), fontsize=10)
+text(-0.49, 0.8, "Left:~~ $(P_L, \\rho_L, v_L) = (%.3f, %.3f, %.3f)$"%(P_L, rho_L, v_L), fontsize=10)
+text(-0.49, 0.7, "Right: $(P_R, \\rho_R, v_R) = (%.3f, %.3f, %.3f)$"%(P_R, rho_R, v_R), fontsize=10)
+z_i = 1. / a_i - 1.
+text(-0.49, 0.6, "Initial redshift: $%.2f$"%z_i, fontsize=10)
+plot([-0.49, 0.1], [0.52, 0.52], 'k-', lw=1)
+text(-0.49, 0.4, "$\\textsc{Swift}$ %s"%git, fontsize=10)
+text(-0.49, 0.3, scheme, fontsize=10)
+text(-0.49, 0.2, kernel, fontsize=10)
+text(-0.49, 0.1, "$%.2f$ neighbours ($\\eta=%.3f$)"%(neighbours, eta), fontsize=10)
+xlim(-0.5, 0.5)
+ylim(0, 1)
+xticks([])
+yticks([])
+
+tight_layout()
+
+savefig("SodShock.png", dpi=200)
diff --git a/examples/Cosmology/ComovingSodShock_1D/run.sh b/examples/Cosmology/ComovingSodShock_1D/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2eae1729e007ac087654cb7b04d0701542cb4c75
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_1D/run.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+# Generate the initial conditions if they are not present.
+if [ ! -e sodShock.hdf5 ]
+then
+    echo "Generating initial conditions for the 1D SodShock example..."
+    python makeIC.py
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --threads=1 sodShock.yml 2>&1 | tee output.log
+
+# Plot the result
+python plotSolution.py 1
diff --git a/examples/Cosmology/ComovingSodShock_1D/sodShock.yml b/examples/Cosmology/ComovingSodShock_1D/sodShock.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2d7a5727cbbc2cd417527ce05d7a8ea8ea05dd71
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_1D/sodShock.yml
@@ -0,0 +1,43 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     2.94e55   # Grams
+  UnitLength_in_cgs:   3.086e18   # pc
+  UnitVelocity_in_cgs: 1.   # km per s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            sodShock # Common part of the name of output files
+  time_first:          0.       # Time of the first output (in internal units)
+  delta_time:          1.06638      # Time difference between consecutive outputs (in internal units)
+  scale_factor_first:  0.001
+  compression:         1
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02 # Time between statistics output
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
+
+Cosmology:
+  Omega_m: 1.
+  Omega_lambda: 0.
+  Omega_b: 1.
+  h: 1.
+  a_begin: 0.001
+  a_end: 0.00106638
+
diff --git a/examples/SodShockSpherical_2D/getGlass.sh b/examples/Cosmology/ComovingSodShock_2D/getGlass.sh
similarity index 100%
rename from examples/SodShockSpherical_2D/getGlass.sh
rename to examples/Cosmology/ComovingSodShock_2D/getGlass.sh
diff --git a/examples/Cosmology/ComovingSodShock_2D/makeIC.py b/examples/Cosmology/ComovingSodShock_2D/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..51a408866047534f86fbded071d604ec294ed0b7
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_2D/makeIC.py
@@ -0,0 +1,127 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+import h5py
+from numpy import *
+
+# Generates a swift IC file for the 2D Sod Shock in a periodic box
+
+unit_l_in_cgs = 3.086e18
+unit_m_in_cgs = 2.94e55
+unit_t_in_cgs = 3.086e18
+
+# Parameters
+gamma = 5./3.          # Gas adiabatic index
+x_min = -1.
+x_max = 1.
+rho_L = 1.             # Density left state
+rho_R = 0.140625       # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+a_beg = 0.001
+fileName = "sodShock.hdf5" 
+
+
+#---------------------------------------------------
+boxSize = (x_max - x_min)
+
+glass_L = h5py.File("glassPlane_128.hdf5", "r")
+glass_R = h5py.File("glassPlane_48.hdf5", "r")
+
+pos_L = glass_L["/PartType0/Coordinates"][:,:] * 0.5
+pos_R = glass_R["/PartType0/Coordinates"][:,:] * 0.5
+h_L = glass_L["/PartType0/SmoothingLength"][:] * 0.5
+h_R = glass_R["/PartType0/SmoothingLength"][:] * 0.5
+
+# Merge things
+aa = pos_L - array([0.5, 0., 0.])
+pos_LL = append(pos_L, pos_L + array([0.5, 0., 0.]), axis=0)
+pos_RR = append(pos_R, pos_R + array([0.5, 0., 0.]), axis=0)
+pos = append(pos_LL - array([1.0, 0., 0.]), pos_RR, axis=0)
+h_LL = append(h_L, h_L)
+h_RR = append(h_R, h_R)
+h = append(h_LL, h_RR)
+
+numPart_L = size(h_LL)
+numPart_R = size(h_RR)
+numPart = size(h)
+
+vol_L = 0.5
+vol_R = 0.5
+
+# Generate extra arrays
+v = zeros((numPart, 3))
+ids = linspace(1, numPart, numPart)
+m = zeros(numPart)
+u = zeros(numPart)
+
+for i in range(numPart):
+    x = pos[i,0]
+
+    if x < 0: #left
+        u[i] = P_L / (rho_L * (gamma - 1.))
+        m[i] = rho_L * vol_L / numPart_L
+        v[i,0] = v_L
+    else:     #right
+        u[i] = P_R / (rho_R * (gamma - 1.))
+        m[i] = rho_R * vol_R / numPart_R
+        v[i,0] = v_R
+        
+# Shift particles
+pos[:,0] -= x_min
+
+u /= (a_beg**(3. * (gamma - 1.)))
+
+#File
+file = h5py.File(fileName, 'w')
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = [boxSize, 0.5, 1.0]
+grp.attrs["NumPart_Total"] =  [numPart, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = 0
+grp.attrs["Dimension"] = 2
+
+#Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = unit_l_in_cgs
+grp.attrs["Unit mass in cgs (U_M)"] = unit_m_in_cgs
+grp.attrs["Unit time in cgs (U_t)"] = unit_t_in_cgs
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
+#Particle group
+grp = file.create_group("/PartType0")
+grp.create_dataset('Coordinates', data=pos, dtype='d')
+grp.create_dataset('Velocities', data=v, dtype='f')
+grp.create_dataset('Masses', data=m, dtype='f')
+grp.create_dataset('SmoothingLength', data=h, dtype='f')
+grp.create_dataset('InternalEnergy', data=u, dtype='f')
+grp.create_dataset('ParticleIDs', data=ids, dtype='L')
+
+
+file.close()
diff --git a/examples/Cosmology/ComovingSodShock_2D/plotSolution.py b/examples/Cosmology/ComovingSodShock_2D/plotSolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..8adb3cf5c550ab9724f6a8f34c1a1260a25712e1
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_2D/plotSolution.py
@@ -0,0 +1,318 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+# Computes the analytical solution of the Sod shock and plots the SPH answer
+ 
+
+# Generates the analytical  solution for the Sod shock test case
+# The script works for a given left (x<0) and right (x>0) state and computes the solution at a later time t.
+# This follows the solution given in (Toro, 2009)
+
+
+# Parameters
+gas_gamma = 5./3.      # Polytropic index
+rho_L = 1.             # Density left state
+rho_R = 0.140625       # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+from scipy import stats
+import h5py
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 12,
+'legend.fontsize': 12,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (9.90,6.45),
+'figure.subplot.left'    : 0.045,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.05,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 3.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+
+snap = int(sys.argv[1])
+
+
+# Read the simulation data
+sim = h5py.File("sodShock_%04d.hdf5"%snap, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+anow = sim["/Header"].attrs["Scale-factor"]
+a_i = sim["/Cosmology"].attrs["a_beg"]
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"]
+time = 2. * (1. / np.sqrt(a_i) - 1. / np.sqrt(anow)) / H_0
+scheme = sim["/HydroScheme"].attrs["Scheme"]
+kernel = sim["/HydroScheme"].attrs["Kernel function"]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"]
+eta = sim["/HydroScheme"].attrs["Kernel eta"]
+git = sim["Code"].attrs["Git Revision"]
+
+x = sim["/PartType0/Coordinates"][:,0]
+v = sim["/PartType0/Velocities"][:,0] * anow
+u = sim["/PartType0/InternalEnergy"][:]
+S = sim["/PartType0/Entropy"][:]
+P = sim["/PartType0/Pressure"][:]
+rho = sim["/PartType0/Density"][:]
+
+N = 1000  # Number of points
+x_min = -1.
+x_max = 1.
+x += x_min
+
+
+# Bin te data
+x_bin_edge = np.arange(-0.6, 0.6, 0.02)
+x_bin = 0.5*(x_bin_edge[1:] + x_bin_edge[:-1])
+rho_bin,_,_ = stats.binned_statistic(x, rho, statistic='mean', bins=x_bin_edge)
+v_bin,_,_ = stats.binned_statistic(x, v, statistic='mean', bins=x_bin_edge)
+P_bin,_,_ = stats.binned_statistic(x, P, statistic='mean', bins=x_bin_edge)
+S_bin,_,_ = stats.binned_statistic(x, S, statistic='mean', bins=x_bin_edge)
+u_bin,_,_ = stats.binned_statistic(x, u, statistic='mean', bins=x_bin_edge)
+rho2_bin,_,_ = stats.binned_statistic(x, rho**2, statistic='mean', bins=x_bin_edge)
+v2_bin,_,_ = stats.binned_statistic(x, v**2, statistic='mean', bins=x_bin_edge)
+P2_bin,_,_ = stats.binned_statistic(x, P**2, statistic='mean', bins=x_bin_edge)
+S2_bin,_,_ = stats.binned_statistic(x, S**2, statistic='mean', bins=x_bin_edge)
+u2_bin,_,_ = stats.binned_statistic(x, u**2, statistic='mean', bins=x_bin_edge)
+rho_sigma_bin = np.sqrt(rho2_bin - rho_bin**2)
+v_sigma_bin = np.sqrt(v2_bin - v_bin**2)
+P_sigma_bin = np.sqrt(P2_bin - P_bin**2)
+S_sigma_bin = np.sqrt(S2_bin - S_bin**2)
+u_sigma_bin = np.sqrt(u2_bin - u_bin**2)
+
+
+# Analytic solution
+c_L = sqrt(gas_gamma * P_L / rho_L)   # Speed of the rarefaction wave
+c_R = sqrt(gas_gamma * P_R / rho_R)   # Speed of the shock front
+
+# Helpful variable
+Gama = (gas_gamma - 1.) / (gas_gamma + 1.)
+beta = (gas_gamma - 1.) / (2. * gas_gamma)
+
+# Characteristic function and its derivative, following Toro (2009)
+def compute_f(P_3, P, c):
+    u = P_3 / P
+    if u > 1:
+        term1 = gas_gamma*((gas_gamma+1.)*u + gas_gamma-1.)
+        term2 = sqrt(2./term1)
+        fp = (u - 1.)*c*term2
+        dfdp = c*term2/P + (u - 1.)*c/term2*(-1./term1**2)*gas_gamma*(gas_gamma+1.)/P
+    else:
+        fp = (u**beta - 1.)*(2.*c/(gas_gamma-1.))
+        dfdp = 2.*c/(gas_gamma-1.)*beta*u**(beta-1.)/P
+    return (fp, dfdp)
+
+# Solution of the Riemann problem following Toro (2009) 
+def RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R):
+    P_new = ((c_L + c_R + (v_L - v_R)*0.5*(gas_gamma-1.))/(c_L / P_L**beta + c_R / P_R**beta))**(1./beta)
+    P_3 = 0.5*(P_R + P_L)
+    f_L = 1.
+    while fabs(P_3 - P_new) > 1e-6:
+        P_3 = P_new
+        (f_L, dfdp_L) = compute_f(P_3, P_L, c_L)
+        (f_R, dfdp_R) = compute_f(P_3, P_R, c_R)
+        f = f_L + f_R + (v_R - v_L)
+        df = dfdp_L + dfdp_R
+        dp =  -f/df
+        prnew = P_3 + dp
+    v_3 = v_L - f_L
+    return (P_new, v_3)
+
+
+# Solve Riemann problem for post-shock region
+(P_3, v_3) = RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R)
+
+# Check direction of shocks and wave
+shock_R = (P_3 > P_R)
+shock_L = (P_3 > P_L)
+
+# Velocity of shock front and and rarefaction wave
+if shock_R:
+    v_right = v_R + c_R**2*(P_3/P_R - 1.)/(gas_gamma*(v_3-v_R))
+else:
+    v_right = c_R + 0.5*(gas_gamma+1.)*v_3 - 0.5*(gas_gamma-1.)*v_R
+
+if shock_L:
+    v_left = v_L + c_L**2*(P_3/p_L - 1.)/(gas_gamma*(v_3-v_L))
+else:
+    v_left = c_L - 0.5*(gas_gamma+1.)*v_3 + 0.5*(gas_gamma-1.)*v_L
+
+# Compute position of the transitions
+x_23 = -fabs(v_left) * time
+if shock_L :
+    x_12 = -fabs(v_left) * time
+else:
+    x_12 = -(c_L - v_L) * time
+
+x_34 = v_3 * time
+
+x_45 = fabs(v_right) * time
+if shock_R:
+    x_56 = fabs(v_right) * time
+else:
+    x_56 = (c_R + v_R) * time
+
+
+# Prepare arrays
+delta_x = (x_max - x_min) / N
+x_s = arange(x_min, x_max, delta_x)
+rho_s = zeros(N)
+P_s = zeros(N)
+v_s = zeros(N)
+
+# Compute solution in the different regions
+for i in range(N):
+    if x_s[i] <= x_12:
+        rho_s[i] = rho_L
+        P_s[i] = P_L
+        v_s[i] = v_L
+    if x_s[i] >= x_12 and x_s[i] < x_23:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1. + Gama * P_3/P_L)
+            P_s[i] = P_3
+            v_s[i] = v_3
+        else:
+            rho_s[i] = rho_L*(Gama * (0. - x_s[i])/(c_L * time) + Gama * v_L/c_L + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = P_L*(rho_s[i] / rho_L)**gas_gamma
+            v_s[i] = (1.-Gama)*(c_L -(0. - x_s[i]) / time) + Gama*v_L
+    if x_s[i] >= x_23 and x_s[i] < x_34:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1+Gama * P_3/p_L)
+        else:
+            rho_s[i] = rho_L*(P_3 / P_L)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_34 and x_s[i] < x_45:
+        if shock_R:
+            rho_s[i] = rho_R*(Gama + P_3/P_R)/(1. + Gama * P_3/P_R)
+        else:
+            rho_s[i] = rho_R*(P_3 / P_R)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_45 and x_s[i] < x_56:
+        if shock_R:
+            rho_s[i] = rho_R
+            P_s[i] = P_R
+            v_s[i] = v_R
+        else:
+            rho_s[i] = rho_R*(Gama*(x_s[i])/(c_R*time) - Gama*v_R/c_R + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = p_R*(rho_s[i]/rho_R)**gas_gamma
+            v_s[i] = (1.-Gama)*(-c_R - (-x_s[i])/time) + Gama*v_R
+    if x_s[i] >= x_56:
+        rho_s[i] = rho_R
+        P_s[i] = P_R
+        v_s[i] = v_R
+
+
+# Additional arrays
+u_s = P_s / (rho_s * (gas_gamma - 1.))  #internal energy
+s_s = P_s / rho_s**gas_gamma # entropic function
+        
+
+# Plot the interesting quantities
+figure()
+
+# Velocity profile --------------------------------
+subplot(231)
+plot(x, v, '.', color='r', ms=0.2)
+plot(x_s, v_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, v_bin, yerr=v_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Velocity}}~v_x$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(-0.1, 0.95)
+
+# Density profile --------------------------------
+subplot(232)
+plot(x, rho, '.', color='r', ms=0.2)
+plot(x_s, rho_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, rho_bin, yerr=rho_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Density}}~\\rho$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.05, 1.1)
+
+# Pressure profile --------------------------------
+subplot(233)
+plot(x, P, '.', color='r', ms=0.2)
+plot(x_s, P_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, P_bin, yerr=P_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Pressure}}~P$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.01, 1.1)
+
+# Internal energy profile -------------------------
+subplot(234)
+plot(x, u, '.', color='r', ms=0.2)
+plot(x_s, u_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, u_bin, yerr=u_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Internal~Energy}}~u$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.8, 2.2)
+
+# Entropy profile ---------------------------------
+subplot(235)
+plot(x, S, '.', color='r', ms=0.2)
+plot(x_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, S_bin, yerr=S_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Entropy}}~S$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.8, 3.8)
+
+# Information -------------------------------------
+subplot(236, frameon=False)
+
+z_now = 1. / anow - 1.
+text(-0.49, 0.9, "Sod shock with  $\\gamma=%.3f$ in 2D at $z=%.2f$"%(gas_gamma,z_now), fontsize=10)
+text(-0.49, 0.8, "Left:~~ $(P_L, \\rho_L, v_L) = (%.3f, %.3f, %.3f)$"%(P_L, rho_L, v_L), fontsize=10)
+text(-0.49, 0.7, "Right: $(P_R, \\rho_R, v_R) = (%.3f, %.3f, %.3f)$"%(P_R, rho_R, v_R), fontsize=10)
+z_i = 1. / a_i - 1.
+text(-0.49, 0.6, "Initial redshift: $%.2f$"%z_i, fontsize=10)
+plot([-0.49, 0.1], [0.52, 0.52], 'k-', lw=1)
+text(-0.49, 0.4, "$\\textsc{Swift}$ %s"%git, fontsize=10)
+text(-0.49, 0.3, scheme, fontsize=10)
+text(-0.49, 0.2, kernel, fontsize=10)
+text(-0.49, 0.1, "$%.2f$ neighbours ($\\eta=%.3f$)"%(neighbours, eta), fontsize=10)
+xlim(-0.5, 0.5)
+ylim(0, 1)
+xticks([])
+yticks([])
+
+tight_layout()
+savefig("SodShock.png", dpi=200)
diff --git a/examples/Cosmology/ComovingSodShock_2D/run.sh b/examples/Cosmology/ComovingSodShock_2D/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1723153e3eff1f7d49970ed4e3d5f69b39b67a1a
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_2D/run.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Generate the initial conditions if they are not present.
+if [ ! -e glassPlane_128.hdf5 ]
+then
+    echo "Fetching initial glass file for the Sod shock example..."
+    ./getGlass.sh
+fi
+if [ ! -e sodShock.hdf5 ]
+then
+    echo "Generating initial conditions for the Sod shock example..."
+    python makeIC.py
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --threads=4 sodShock.yml 2>&1 | tee output.log
+
+python plotSolution.py 1
diff --git a/examples/Cosmology/ComovingSodShock_2D/sodShock.yml b/examples/Cosmology/ComovingSodShock_2D/sodShock.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2d7a5727cbbc2cd417527ce05d7a8ea8ea05dd71
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_2D/sodShock.yml
@@ -0,0 +1,43 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     2.94e55   # Grams
+  UnitLength_in_cgs:   3.086e18   # pc
+  UnitVelocity_in_cgs: 1.   # km per s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            sodShock # Common part of the name of output files
+  time_first:          0.       # Time of the first output (in internal units)
+  delta_time:          1.06638      # Time difference between consecutive outputs (in internal units)
+  scale_factor_first:  0.001
+  compression:         1
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02 # Time between statistics output
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
+
+Cosmology:
+  Omega_m: 1.
+  Omega_lambda: 0.
+  Omega_b: 1.
+  h: 1.
+  a_begin: 0.001
+  a_end: 0.00106638
+
diff --git a/examples/Cosmology/ComovingSodShock_3D/README.txt b/examples/Cosmology/ComovingSodShock_3D/README.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2b2f0d16207079f5d29e9ec281fe5255cc5886fb
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_3D/README.txt
@@ -0,0 +1,20 @@
+Cosmological version of the standard Sod shock test.
+
+In the co-moving coordinates that SWIFT uses, the Euler equations of 
+hydrodynamics have an elegant form with a few additional factors that 
+involve the scale factor a. For the specific case of a polytropic index 
+gamma = 5/3, all additional factors are in fact the same: 1/a^2. For 
+this case, hydrodynamics in the co-moving frame is identical to 
+hydrodynamics in a physical non-cosmological frame with a rescaled time 
+variable dt'=a^2*dt.
+
+We choose an Einstein-de Sitter cosmology with H(a)=H_0*a^(3/2) and a 
+box size of 1 pc and rescale the Sod shock initial conditions so that 
+the internal coordinates, density and pressure values are still the same 
+as for the original Sod shock in the non-cosmological case. We then 
+evolve the initial condition from z=999 to z=960.5, which corresponds to 
+a rescaled time interval dt'~0.12. If the co-moving coordinates are 
+implemented correctly, the resulting co-moving density, internal 
+velocity and co-moving pressure profiles should match those for the 
+non-co-moving variables in the ordinary non-cosmological Sod shock at 
+t=0.12.
diff --git a/examples/SodShockSpherical_3D/getGlass.sh b/examples/Cosmology/ComovingSodShock_3D/getGlass.sh
similarity index 100%
rename from examples/SodShockSpherical_3D/getGlass.sh
rename to examples/Cosmology/ComovingSodShock_3D/getGlass.sh
diff --git a/examples/Cosmology/ComovingSodShock_3D/makeIC.py b/examples/Cosmology/ComovingSodShock_3D/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6528bc5ab3670d4423945d194fc537c1bb672a1
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_3D/makeIC.py
@@ -0,0 +1,126 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+import h5py
+from numpy import *
+
+# Generates a swift IC file for the 3D Sod Shock in a periodic box
+
+unit_l_in_cgs = 3.086e18
+unit_m_in_cgs = 2.94e55
+unit_t_in_cgs = 3.086e18
+
+# Parameters
+gamma = 5./3.          # Gas adiabatic index
+x_min = -1.
+x_max = 1.
+rho_L = 1.             # Density left state
+rho_R = 0.125          # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+a_beg = 0.001
+fileName = "sodShock.hdf5" 
+
+
+#---------------------------------------------------
+boxSize = (x_max - x_min)
+
+glass_L = h5py.File("glassCube_64.hdf5", "r")
+glass_R = h5py.File("glassCube_32.hdf5", "r")
+
+pos_L = glass_L["/PartType0/Coordinates"][:,:] * 0.5
+pos_R = glass_R["/PartType0/Coordinates"][:,:] * 0.5
+h_L = glass_L["/PartType0/SmoothingLength"][:] * 0.5
+h_R = glass_R["/PartType0/SmoothingLength"][:] * 0.5
+
+# Merge things
+aa = pos_L - array([0.5, 0., 0.])
+pos_LL = append(pos_L, pos_L + array([0.5, 0., 0.]), axis=0)
+pos_RR = append(pos_R, pos_R + array([0.5, 0., 0.]), axis=0)
+pos = append(pos_LL - array([1.0, 0., 0.]), pos_RR, axis=0)
+h_LL = append(h_L, h_L)
+h_RR = append(h_R, h_R)
+h = append(h_LL, h_RR)
+
+numPart_L = size(h_LL)
+numPart_R = size(h_RR)
+numPart = size(h)
+
+vol_L = (0.25 * boxSize)**2 * (0.5 * boxSize)
+vol_R = (0.25 * boxSize)**2 * (0.5 * boxSize)
+
+# Generate extra arrays
+v = zeros((numPart, 3))
+ids = linspace(1, numPart, numPart)
+m = zeros(numPart)
+u = zeros(numPart)
+
+for i in range(numPart):
+    x = pos[i,0]
+
+    if x < 0: #left
+        u[i] = P_L / (rho_L * (gamma - 1.))
+        m[i] = rho_L * vol_L / numPart_L
+        v[i,0] = v_L
+    else:     #right
+        u[i] = P_R / (rho_R * (gamma - 1.))
+        m[i] = rho_R * vol_R / numPart_R
+        v[i,0] = v_R
+        
+# Shift particles
+pos[:,0] -= x_min
+
+u /= (a_beg**(3. * (gamma - 1.)))
+
+#File
+file = h5py.File(fileName, 'w')
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = [boxSize, 0.25 * boxSize, 0.25 * boxSize]
+grp.attrs["NumPart_Total"] =  [numPart, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = 0
+grp.attrs["Dimension"] = 3
+
+#Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = unit_l_in_cgs
+grp.attrs["Unit mass in cgs (U_M)"] = unit_m_in_cgs
+grp.attrs["Unit time in cgs (U_t)"] = unit_t_in_cgs
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
+#Particle group
+grp = file.create_group("/PartType0")
+grp.create_dataset('Coordinates', data=pos, dtype='d')
+grp.create_dataset('Velocities', data=v, dtype='f')
+grp.create_dataset('Masses', data=m, dtype='f')
+grp.create_dataset('SmoothingLength', data=h, dtype='f')
+grp.create_dataset('InternalEnergy', data=u, dtype='f')
+grp.create_dataset('ParticleIDs', data=ids, dtype='L')
+
+file.close()
diff --git a/examples/Cosmology/ComovingSodShock_3D/plotSolution.py b/examples/Cosmology/ComovingSodShock_3D/plotSolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..f05a385e8620b18189d2e7abca8aebb8ae65060e
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_3D/plotSolution.py
@@ -0,0 +1,316 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ #               2018 Bert Vandenbroucke (bert.vandenbroucke@gmail.com)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+# Computes the analytical solution of the Sod shock and plots the SPH answer
+ 
+
+# Generates the analytical  solution for the Sod shock test case
+# The script works for a given left (x<0) and right (x>0) state and computes the solution at a later time t.
+# This follows the solution given in (Toro, 2009)
+
+
+# Parameters
+gas_gamma = 5./3.      # Polytropic index
+rho_L = 1.             # Density left state
+rho_R = 0.125          # Density right state
+v_L = 0.               # Velocity left state
+v_R = 0.               # Velocity right state
+P_L = 1.               # Pressure left state
+P_R = 0.1              # Pressure right state
+
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+from scipy import stats
+import h5py
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 12,
+'legend.fontsize': 12,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (9.90,6.45),
+'figure.subplot.left'    : 0.045,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.05,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 3.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+
+snap = int(sys.argv[1])
+
+
+# Read the simulation data
+sim = h5py.File("sodShock_%04d.hdf5"%snap, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+anow = sim["/Header"].attrs["Scale-factor"]
+a_i = sim["/Cosmology"].attrs["a_beg"]
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"]
+time = 2. * (1. / np.sqrt(a_i) - 1. / np.sqrt(anow)) / H_0
+scheme = sim["/HydroScheme"].attrs["Scheme"]
+kernel = sim["/HydroScheme"].attrs["Kernel function"]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"]
+eta = sim["/HydroScheme"].attrs["Kernel eta"]
+git = sim["Code"].attrs["Git Revision"]
+
+x = sim["/PartType0/Coordinates"][:,0]
+v = sim["/PartType0/Velocities"][:,0] * anow
+u = sim["/PartType0/InternalEnergy"][:]
+S = sim["/PartType0/Entropy"][:]
+P = sim["/PartType0/Pressure"][:]
+rho = sim["/PartType0/Density"][:]
+
+x_min = -1.
+x_max = 1.
+x += x_min
+N = 1000
+
+# Bin te data
+x_bin_edge = np.arange(-0.6, 0.6, 0.02)
+x_bin = 0.5*(x_bin_edge[1:] + x_bin_edge[:-1])
+rho_bin,_,_ = stats.binned_statistic(x, rho, statistic='mean', bins=x_bin_edge)
+v_bin,_,_ = stats.binned_statistic(x, v, statistic='mean', bins=x_bin_edge)
+P_bin,_,_ = stats.binned_statistic(x, P, statistic='mean', bins=x_bin_edge)
+S_bin,_,_ = stats.binned_statistic(x, S, statistic='mean', bins=x_bin_edge)
+u_bin,_,_ = stats.binned_statistic(x, u, statistic='mean', bins=x_bin_edge)
+rho2_bin,_,_ = stats.binned_statistic(x, rho**2, statistic='mean', bins=x_bin_edge)
+v2_bin,_,_ = stats.binned_statistic(x, v**2, statistic='mean', bins=x_bin_edge)
+P2_bin,_,_ = stats.binned_statistic(x, P**2, statistic='mean', bins=x_bin_edge)
+S2_bin,_,_ = stats.binned_statistic(x, S**2, statistic='mean', bins=x_bin_edge)
+u2_bin,_,_ = stats.binned_statistic(x, u**2, statistic='mean', bins=x_bin_edge)
+rho_sigma_bin = np.sqrt(rho2_bin - rho_bin**2)
+v_sigma_bin = np.sqrt(v2_bin - v_bin**2)
+P_sigma_bin = np.sqrt(P2_bin - P_bin**2)
+S_sigma_bin = np.sqrt(S2_bin - S_bin**2)
+u_sigma_bin = np.sqrt(u2_bin - u_bin**2)
+
+
+# Analytic solution 
+c_L = sqrt(gas_gamma * P_L / rho_L)   # Speed of the rarefaction wave
+c_R = sqrt(gas_gamma * P_R / rho_R)   # Speed of the shock front
+
+# Helpful variable
+Gama = (gas_gamma - 1.) / (gas_gamma + 1.)
+beta = (gas_gamma - 1.) / (2. * gas_gamma)
+
+# Characteristic function and its derivative, following Toro (2009)
+def compute_f(P_3, P, c):
+    u = P_3 / P
+    if u > 1:
+        term1 = gas_gamma*((gas_gamma+1.)*u + gas_gamma-1.)
+        term2 = sqrt(2./term1)
+        fp = (u - 1.)*c*term2
+        dfdp = c*term2/P + (u - 1.)*c/term2*(-1./term1**2)*gas_gamma*(gas_gamma+1.)/P
+    else:
+        fp = (u**beta - 1.)*(2.*c/(gas_gamma-1.))
+        dfdp = 2.*c/(gas_gamma-1.)*beta*u**(beta-1.)/P
+    return (fp, dfdp)
+
+# Solution of the Riemann problem following Toro (2009) 
+def RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R):
+    P_new = ((c_L + c_R + (v_L - v_R)*0.5*(gas_gamma-1.))/(c_L / P_L**beta + c_R / P_R**beta))**(1./beta)
+    P_3 = 0.5*(P_R + P_L)
+    f_L = 1.
+    while fabs(P_3 - P_new) > 1e-6:
+        P_3 = P_new
+        (f_L, dfdp_L) = compute_f(P_3, P_L, c_L)
+        (f_R, dfdp_R) = compute_f(P_3, P_R, c_R)
+        f = f_L + f_R + (v_R - v_L)
+        df = dfdp_L + dfdp_R
+        dp =  -f/df
+        prnew = P_3 + dp
+    v_3 = v_L - f_L
+    return (P_new, v_3)
+
+
+# Solve Riemann problem for post-shock region
+(P_3, v_3) = RiemannProblem(rho_L, P_L, v_L, rho_R, P_R, v_R)
+
+# Check direction of shocks and wave
+shock_R = (P_3 > P_R)
+shock_L = (P_3 > P_L)
+
+# Velocity of shock front and and rarefaction wave
+if shock_R:
+    v_right = v_R + c_R**2*(P_3/P_R - 1.)/(gas_gamma*(v_3-v_R))
+else:
+    v_right = c_R + 0.5*(gas_gamma+1.)*v_3 - 0.5*(gas_gamma-1.)*v_R
+
+if shock_L:
+    v_left = v_L + c_L**2*(P_3/p_L - 1.)/(gas_gamma*(v_3-v_L))
+else:
+    v_left = c_L - 0.5*(gas_gamma+1.)*v_3 + 0.5*(gas_gamma-1.)*v_L
+
+# Compute position of the transitions
+x_23 = -fabs(v_left) * time
+if shock_L :
+    x_12 = -fabs(v_left) * time
+else:
+    x_12 = -(c_L - v_L) * time
+
+x_34 = v_3 * time
+
+x_45 = fabs(v_right) * time
+if shock_R:
+    x_56 = fabs(v_right) * time
+else:
+    x_56 = (c_R + v_R) * time
+
+
+# Prepare arrays
+delta_x = (x_max - x_min) / N
+x_s = arange(x_min, x_max, delta_x)
+rho_s = zeros(N)
+P_s = zeros(N)
+v_s = zeros(N)
+
+# Compute solution in the different regions
+for i in range(N):
+    if x_s[i] <= x_12:
+        rho_s[i] = rho_L
+        P_s[i] = P_L
+        v_s[i] = v_L
+    if x_s[i] >= x_12 and x_s[i] < x_23:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1. + Gama * P_3/P_L)
+            P_s[i] = P_3
+            v_s[i] = v_3
+        else:
+            rho_s[i] = rho_L*(Gama * (0. - x_s[i])/(c_L * time) + Gama * v_L/c_L + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = P_L*(rho_s[i] / rho_L)**gas_gamma
+            v_s[i] = (1.-Gama)*(c_L -(0. - x_s[i]) / time) + Gama*v_L
+    if x_s[i] >= x_23 and x_s[i] < x_34:
+        if shock_L:
+            rho_s[i] = rho_L*(Gama + P_3/P_L)/(1+Gama * P_3/p_L)
+        else:
+            rho_s[i] = rho_L*(P_3 / P_L)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_34 and x_s[i] < x_45:
+        if shock_R:
+            rho_s[i] = rho_R*(Gama + P_3/P_R)/(1. + Gama * P_3/P_R)
+        else:
+            rho_s[i] = rho_R*(P_3 / P_R)**(1./gas_gamma)
+        P_s[i] = P_3
+        v_s[i] = v_3
+    if x_s[i] >= x_45 and x_s[i] < x_56:
+        if shock_R:
+            rho_s[i] = rho_R
+            P_s[i] = P_R
+            v_s[i] = v_R
+        else:
+            rho_s[i] = rho_R*(Gama*(x_s[i])/(c_R*time) - Gama*v_R/c_R + (1.-Gama))**(2./(gas_gamma-1.))
+            P_s[i] = p_R*(rho_s[i]/rho_R)**gas_gamma
+            v_s[i] = (1.-Gama)*(-c_R - (-x_s[i])/time) + Gama*v_R
+    if x_s[i] >= x_56:
+        rho_s[i] = rho_R
+        P_s[i] = P_R
+        v_s[i] = v_R
+
+
+# Additional arrays
+u_s = P_s / (rho_s * (gas_gamma - 1.))  #internal energy
+s_s = P_s / rho_s**gas_gamma # entropic function
+        
+# Plot the interesting quantities
+figure()
+
+# Velocity profile --------------------------------
+subplot(231)
+plot(x, v, '.', color='r', ms=0.5, alpha=0.2)
+plot(x_s, v_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, v_bin, yerr=v_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Velocity}}~v_x$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(-0.1, 0.95)
+
+# Density profile --------------------------------
+subplot(232)
+plot(x, rho, '.', color='r', ms=0.5, alpha=0.2)
+plot(x_s, rho_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, rho_bin, yerr=rho_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Density}}~\\rho$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.05, 1.1)
+
+# Pressure profile --------------------------------
+subplot(233)
+plot(x, P, '.', color='r', ms=0.5, alpha=0.2)
+plot(x_s, P_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, P_bin, yerr=P_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Pressure}}~P$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.01, 1.1)
+
+# Internal energy profile -------------------------
+subplot(234)
+plot(x, u, '.', color='r', ms=0.5, alpha=0.2)
+plot(x_s, u_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, u_bin, yerr=u_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Internal~Energy}}~u$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.8, 2.2)
+
+# Entropy profile ---------------------------------
+subplot(235)
+plot(x, S, '.', color='r', ms=0.5, alpha=0.2)
+plot(x_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+errorbar(x_bin, S_bin, yerr=S_sigma_bin, fmt='.', ms=8.0, color='b', lw=1.2)
+xlabel("${\\rm{Position}}~x$", labelpad=0)
+ylabel("${\\rm{Entropy}}~S$", labelpad=0)
+xlim(-0.5, 0.5)
+ylim(0.8, 3.8)
+
+# Information -------------------------------------
+subplot(236, frameon=False)
+
+znow = 1. / anow - 1.
+text(-0.49, 0.9, "Sod shock with  $\\gamma=%.3f$ in 3D at $z=%.2f$"%(gas_gamma,znow), fontsize=10)
+text(-0.49, 0.8, "Left:~~ $(P_L, \\rho_L, v_L) = (%.3f, %.3f, %.3f)$"%(P_L, rho_L, v_L), fontsize=10)
+text(-0.49, 0.7, "Right: $(P_R, \\rho_R, v_R) = (%.3f, %.3f, %.3f)$"%(P_R, rho_R, v_R), fontsize=10)
+z_i = 1. / a_i - 1.
+text(-0.49, 0.6, "Initial redshift: $%.2f$"%z_i, fontsize = 10)
+plot([-0.49, 0.1], [0.52, 0.52], 'k-', lw=1)
+text(-0.49, 0.4, "$\\textsc{Swift}$ %s"%git, fontsize=10)
+text(-0.49, 0.3, scheme, fontsize=10)
+text(-0.49, 0.2, kernel, fontsize=10)
+text(-0.49, 0.1, "$%.2f$ neighbours ($\\eta=%.3f$)"%(neighbours, eta), fontsize=10)
+xlim(-0.5, 0.5)
+ylim(0, 1)
+xticks([])
+yticks([])
+
+tight_layout()
+savefig("SodShock.png", dpi=200)
diff --git a/examples/Cosmology/ComovingSodShock_3D/run.sh b/examples/Cosmology/ComovingSodShock_3D/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b2ee90ecf8d7eabacc8b2b848b406a233953c9e6
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_3D/run.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Generate the initial conditions if they are not present.
+if [ ! -e glassCube_64.hdf5 ]
+then
+    echo "Fetching initial glass file for the Sod shock example..."
+    ./getGlass.sh
+fi
+if [ ! -e sodShock.hdf5 ]
+then
+    echo "Generating initial conditions for the Sod shock example..."
+    python makeIC.py
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --threads=4 sodShock.yml 2>&1 | tee output.log
+
+python plotSolution.py 1
diff --git a/examples/Cosmology/ComovingSodShock_3D/sodShock.yml b/examples/Cosmology/ComovingSodShock_3D/sodShock.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2d7a5727cbbc2cd417527ce05d7a8ea8ea05dd71
--- /dev/null
+++ b/examples/Cosmology/ComovingSodShock_3D/sodShock.yml
@@ -0,0 +1,43 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     2.94e55   # Grams
+  UnitLength_in_cgs:   3.086e18   # pc
+  UnitVelocity_in_cgs: 1.   # km per s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            sodShock # Common part of the name of output files
+  time_first:          0.       # Time of the first output (in internal units)
+  delta_time:          1.06638      # Time difference between consecutive outputs (in internal units)
+  scale_factor_first:  0.001
+  compression:         1
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02 # Time between statistics output
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
+
+Cosmology:
+  Omega_m: 1.
+  Omega_lambda: 0.
+  Omega_b: 1.
+  h: 1.
+  a_begin: 0.001
+  a_end: 0.00106638
+
diff --git a/examples/Cosmology/ConstantCosmoVolume/Gadget2/README b/examples/Cosmology/ConstantCosmoVolume/Gadget2/README
new file mode 100644
index 0000000000000000000000000000000000000000..8063a5da1e68b608759d35373e6006d17bf5047e
--- /dev/null
+++ b/examples/Cosmology/ConstantCosmoVolume/Gadget2/README
@@ -0,0 +1,6 @@
+This parameter file can be used to run the exact same example
+with the Gadget-2 code.
+
+The Gadget code has to be compiled with at least the following options:
+ - PERIODIC
+ - HAVE_HDF5
diff --git a/examples/Cosmology/ConstantCosmoVolume/Gadget2/constant_volume.param b/examples/Cosmology/ConstantCosmoVolume/Gadget2/constant_volume.param
new file mode 100644
index 0000000000000000000000000000000000000000..a57e3293ae9dce92743737d42605615d3e365f7a
--- /dev/null
+++ b/examples/Cosmology/ConstantCosmoVolume/Gadget2/constant_volume.param
@@ -0,0 +1,138 @@
+
+% System of units
+
+UnitLength_in_cm         3.08567758e24      %  1.0 Mpc
+UnitMass_in_g            1.98848e43         %  1.0e10 solar masses 
+UnitVelocity_in_cm_per_s 1e5                %  1 km/sec 
+GravityConstantInternal  4.300927e+01       %  Same value as SWIFT
+
+%  Relevant files
+InitCondFile  	   constantBox
+OutputDir          data/
+
+EnergyFile         energy.txt
+InfoFile           info.txt
+TimingsFile        timings.txt
+CpuFile            cpu.txt
+
+RestartFile        restart
+SnapshotFileBase   box
+
+OutputListFilename dummy
+
+% CPU time -limit
+
+TimeLimitCPU      360000  % = 10 hours
+ResubmitOn        0
+ResubmitCommand   my-scriptfile  
+
+
+% Code options
+
+ICFormat                 3
+SnapFormat               3
+ComovingIntegrationOn    1
+
+TypeOfTimestepCriterion  0
+OutputListOn             0
+PeriodicBoundariesOn     1
+
+%  Caracteristics of run
+
+TimeBegin             0.00990099 % z = 100
+TimeMax	              1.         % z = 0.
+
+Omega0	              1.0
+OmegaLambda           0.0
+OmegaBaryon           1.0
+HubbleParam           1.0
+BoxSize               64.
+
+% Output frequency
+
+TimeBetSnapshot        1.04
+TimeOfFirstSnapshot    0.00991
+
+CpuTimeBetRestartFile     36000.0    ; here in seconds
+TimeBetStatistics         0.05
+
+NumFilesPerSnapshot       1
+NumFilesWrittenInParallel 1
+
+% Accuracy of time integration
+
+ErrTolIntAccuracy      0.025 
+MaxRMSDisplacementFac  0.25
+CourantFac             0.1     
+MaxSizeTimestep        0.002
+MinSizeTimestep        1e-7
+
+
+% Tree algorithm, force accuracy, domain update frequency
+
+ErrTolTheta            0.3
+TypeOfOpeningCriterion 0
+ErrTolForceAcc         0.005
+
+TreeDomainUpdateFrequency    0.01
+
+%  Further parameters of SPH
+
+DesNumNgb              48
+MaxNumNgbDeviation     1.
+ArtBulkViscConst       0.8
+InitGasTemp            0.        
+MinGasTemp             0.
+
+% Memory allocation
+
+PartAllocFactor       1.6
+TreeAllocFactor       0.8
+BufferSize            30  
+
+% Softening lengths
+
+MinGasHsmlFractional 0.001
+
+SofteningGas       0.08          # 80 kpc / h = 1/25 of mean inter-particle separation
+SofteningHalo      0
+SofteningDisk      0
+SofteningBulge     0           
+SofteningStars     0
+SofteningBndry     0
+
+SofteningGasMaxPhys       0.08   # 80 kpc / h = 1/25 of mean inter-particle separation
+SofteningHaloMaxPhys      0
+SofteningDiskMaxPhys      0
+SofteningBulgeMaxPhys     0           
+SofteningStarsMaxPhys     0
+SofteningBndryMaxPhys     0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/Cosmology/ConstantCosmoVolume/README b/examples/Cosmology/ConstantCosmoVolume/README
new file mode 100644
index 0000000000000000000000000000000000000000..de84f6909a7c9086603f5d717232d60ff5e312e3
--- /dev/null
+++ b/examples/Cosmology/ConstantCosmoVolume/README
@@ -0,0 +1,7 @@
+This test is a small cosmological volume with constant density and internal energy.
+The ICs are generated from a glass file to minimize the build-up of peculiar velocities
+over time.
+
+The cosmology model is very simple by design. We use Omega_m = 1, Omega_b = 1, h = 1.
+
+The solution script plots the expected solution both in comoving and physical frames.
diff --git a/examples/Cosmology/ConstantCosmoVolume/constant_volume.yml b/examples/Cosmology/ConstantCosmoVolume/constant_volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ebfcc4ffd72121571fa1a69f900985917b440c65
--- /dev/null
+++ b/examples/Cosmology/ConstantCosmoVolume/constant_volume.yml
@@ -0,0 +1,54 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5   	     # 1 km/s
+  UnitCurrent_in_cgs:  1   	     # Amperes
+  UnitTemp_in_cgs:     1   	     # Kelvin
+
+Cosmology:
+  Omega_m: 1.
+  Omega_lambda: 0.
+  Omega_b: 1.
+  h: 1.
+  a_begin: 0.00990099	# z_ini = 100.
+  a_end: 1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-7
+  dt_max:     2e-3
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:	       box
+  delta_time:          1.04
+  scale_factor_first:  0.00991
+  compression:         4
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first:  0.00991
+  delta_time:          1.1
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # "48 ngb" for the 3D cubic spline
+  CFL_condition:         0.1
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./constantBox.hdf5
+  periodic:   1
+  
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+  
+Gravity:
+  mesh_side_length:   32
+  eta: 0.025
+  theta: 0.3
+  comoving_softening: 0.08	# 80 kpc = 1/25 of mean inter-particle separation
+  max_physical_softening: 0.08  # 80 kpc = 1/25 of mean inter-particle separation
+
diff --git a/examples/Cosmology/ConstantCosmoVolume/getGlass.sh b/examples/Cosmology/ConstantCosmoVolume/getGlass.sh
new file mode 100755
index 0000000000000000000000000000000000000000..01b4474ac21666c843b7abedfa39a76948934911
--- /dev/null
+++ b/examples/Cosmology/ConstantCosmoVolume/getGlass.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/gravity_glassCube_32.hdf5
diff --git a/examples/ConstantCosmoVolume/makeIC.py b/examples/Cosmology/ConstantCosmoVolume/makeIC.py
similarity index 87%
rename from examples/ConstantCosmoVolume/makeIC.py
rename to examples/Cosmology/ConstantCosmoVolume/makeIC.py
index 970f197400129d2ca3f3a7b6ff2cfdd5a7f53f3f..d63cb34299017380795b302115ae69b3af22b088 100644
--- a/examples/ConstantCosmoVolume/makeIC.py
+++ b/examples/Cosmology/ConstantCosmoVolume/makeIC.py
@@ -25,7 +25,7 @@ T_i = 100.           # Initial temperature of the gas (in K)
 z_i = 100.           # Initial redshift
 gamma = 5./3.        # Gas adiabatic index
 numPart_1D = 32
-#glassFile = "glassCube_32.hdf5"
+glassFile = "gravity_glassCube_32.hdf5"
 fileName = "constantBox.hdf5"
 
 
@@ -56,17 +56,16 @@ unit_u_in_si = unit_v_in_si**2
 #---------------------------------------------------
 
 # Read the glass file
-#glass = h5py.File(glassFile, "r" )
+glass = h5py.File(glassFile, "r" )
 
 # Read particle positions and h from the glass
-#pos = glass["/PartType0/Coordinates"][:,:]
-#h = glass["/PartType0/SmoothingLength"][:] * 0.3
-#glass.close()
+pos = glass["/PartType1/Coordinates"][:,:]
+glass.close()
 
 # Total number of particles
-#numPart = size(h)
-#if numPart != numPart_1D**3:
-#  print "Non-matching glass file"
+numPart = size(pos)/3
+if numPart != numPart_1D**3:
+  print("Non-matching glass file")
 numPart = numPart_1D**3
 
 # Set box size and interparticle distance
@@ -78,9 +77,7 @@ a_i = 1. / (1. + z_i)
 m_i = boxSize**3 * rho_0 / numPart
 
 # Build the arrays
-#pos *= boxSize
-#h *= boxSize
-coords = zeros((numPart, 3))
+pos *= boxSize
 v = zeros((numPart, 3))
 ids = linspace(1, numPart, numPart)
 m = zeros(numPart)
@@ -92,9 +89,9 @@ for i in range(numPart_1D):
   for j in range(numPart_1D):
     for k in range(numPart_1D):
       index = i * numPart_1D**2 + j * numPart_1D + k
-      coords[index,0] = (i + 0.5) * delta_x
-      coords[index,1] = (j + 0.5) * delta_x
-      coords[index,2] = (k + 0.5) * delta_x
+      #coords[index,0] = (i + 0.5) * delta_x
+      #coords[index,1] = (j + 0.5) * delta_x
+      #coords[index,2] = (k + 0.5) * delta_x
       u[index] = kB_in_SI * T_i / (gamma - 1.) / mH_in_kg
       h[index] = 1.2348 * delta_x
       m[index] = m_i
@@ -103,7 +100,7 @@ for i in range(numPart_1D):
       v[index,2] = 0.
 
 # Unit conversion
-coords /= unit_l_in_si
+pos /= unit_l_in_si
 v /= unit_v_in_si
 m /= unit_m_in_si
 h /= unit_l_in_si
@@ -140,7 +137,7 @@ grp.attrs["Unit temperature in cgs (U_T)"] = 1.
 
 #Particle group
 grp = file.create_group("/PartType0")
-grp.create_dataset('Coordinates', data=coords, dtype='d', compression="gzip", shuffle=True)
+grp.create_dataset('Coordinates', data=pos, dtype='d', compression="gzip", shuffle=True)
 grp.create_dataset('Velocities', data=v, dtype='f',compression="gzip", shuffle=True)
 grp.create_dataset('Masses', data=m, dtype='f', compression="gzip", shuffle=True)
 grp.create_dataset('SmoothingLength', data=h, dtype='f', compression="gzip", shuffle=True)
diff --git a/examples/ConstantCosmoVolume/plotSolution.py b/examples/Cosmology/ConstantCosmoVolume/plotSolution.py
similarity index 100%
rename from examples/ConstantCosmoVolume/plotSolution.py
rename to examples/Cosmology/ConstantCosmoVolume/plotSolution.py
diff --git a/examples/ConstantCosmoVolume/run.sh b/examples/Cosmology/ConstantCosmoVolume/run.sh
similarity index 50%
rename from examples/ConstantCosmoVolume/run.sh
rename to examples/Cosmology/ConstantCosmoVolume/run.sh
index 521659b26d6e4d3c07a8322ba92fa3d52f0ba2cf..4a30410e868aef58b1a9dac0a3225e5844c5873f 100755
--- a/examples/ConstantCosmoVolume/run.sh
+++ b/examples/Cosmology/ConstantCosmoVolume/run.sh
@@ -1,6 +1,11 @@
 #!/bin/bash
 
 # Generate the initial conditions if they are not present.
+if [ ! -e gravity_glassCube_32.hdf5 ]
+then
+    echo "Fetching initial grvity glass file for the constant cosmological box example..."
+    ./getGlass.sh
+fi
 if [ ! -e constantBox.hdf5 ]
 then
     echo "Generating initial conditions for the uniform cosmo box example..."
@@ -8,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -c -G -t 8 constant_volume.yml 2>&1 | tee output.log
+../../swift --hydro --cosmology --self-gravity --threads=8 constant_volume.yml 2>&1 | tee output.log
 
 # Plot the result
 python plotSolution.py $i
diff --git a/examples/ZeldovichPancake_3D/makeIC.py b/examples/Cosmology/ZeldovichPancake_3D/makeIC.py
similarity index 97%
rename from examples/ZeldovichPancake_3D/makeIC.py
rename to examples/Cosmology/ZeldovichPancake_3D/makeIC.py
index 79ed7e71e924941102049b8457fe070ebd08f5c2..efce60f128cacd04e153912d97e0d94b4ab15785 100644
--- a/examples/ZeldovichPancake_3D/makeIC.py
+++ b/examples/Cosmology/ZeldovichPancake_3D/makeIC.py
@@ -123,10 +123,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 100. * unit_l_in_si
diff --git a/examples/ZeldovichPancake_3D/plotSolution.py b/examples/Cosmology/ZeldovichPancake_3D/plotSolution.py
similarity index 91%
rename from examples/ZeldovichPancake_3D/plotSolution.py
rename to examples/Cosmology/ZeldovichPancake_3D/plotSolution.py
index 2a175e346e041a142c6921052ccf13978afa8a38..eef247fb761e75f8dde8e8abe84075efbd7cb46a 100644
--- a/examples/ZeldovichPancake_3D/plotSolution.py
+++ b/examples/Cosmology/ZeldovichPancake_3D/plotSolution.py
@@ -69,6 +69,7 @@ scheme = sim["/HydroScheme"].attrs["Scheme"]
 kernel = sim["/HydroScheme"].attrs["Kernel function"]
 neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"]
 eta = sim["/HydroScheme"].attrs["Kernel eta"]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"]
 git = sim["Code"].attrs["Git Revision"]
 
 # Cosmological parameters
@@ -82,7 +83,12 @@ S = sim["/PartType0/Entropy"][:]
 P = sim["/PartType0/Pressure"][:]
 rho = sim["/PartType0/Density"][:]
 m = sim["/PartType0/Masses"][:]
-phi = sim["/PartType0/Potential"][:]
+try:
+    phi = sim["/PartType0/Potential"][:]
+except KeyError:
+    # We didn't write the potential, try to go on without
+    print("Couldn't find potential in your output file")
+    phi = np.zeros_like(m)
 
 x -= 0.5 * boxSize
 
@@ -96,7 +102,7 @@ if os.path.exists(filename_g):
     rho_g = sim_g["/PartType0/Density"][:]
     phi_g = sim_g["/PartType0/Potential"][:]
     a_g = sim_g["/Header"].attrs["Time"]
-    print "Gadget Scale-factor:", a_g, "redshift:", 1/a_g - 1.
+    print("Gadget Scale-factor:", a_g, "redshift:", 1/a_g - 1.)
     
     x_g -= 0.5 * boxSize
 else:
@@ -167,7 +173,7 @@ u /= a**(3 * (gas_gamma - 1.))
 u_g /= a**(3 * (gas_gamma - 1.))
 T = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
 T_g = (gas_gamma - 1.) * u_g * mH_in_kg / k_in_J_K
-print "z = {0:.2f}, T_avg = {1:.2f}".format(redshift, T.mean())
+print("z = {0:.2f}, T_avg = {1:.2f}".format(redshift, T.mean()))
 if np.size(x_g) > 1:
     plot(x_g, T_g, 's', color='g', alpha=0.8, lw=1.2, ms=4)
 plot(x, T, '.', color='r', ms=4.0)
@@ -178,8 +184,8 @@ ylabel("${\\rm{Temperature}}~T$", labelpad=0)
 # Information -------------------------------------
 subplot(236, frameon=False)
 
-text(-0.49, 0.9, "Zeldovich pancake with  $\\gamma=%.3f$ in 1D at $t=%.2f$"%(gas_gamma,time), fontsize=10)
-text(-0.49, 0.8, "$z={0:.2f}$".format(redshift))
+text(-0.49, 0.9, "Zeldovich pancake at z=%.2f "%(redshift), fontsize=10)
+text(-0.49, 0.8, "adiabatic index $\\gamma=%.2f$, viscosity $\\alpha=%.2f$"%(gas_gamma, alpha), fontsize=10)
 plot([-0.49, 0.1], [0.62, 0.62], 'k-', lw=1)
 text(-0.49, 0.5, "$\\textsc{Swift}$ %s"%git, fontsize=10)
 text(-0.49, 0.4, scheme, fontsize=10)
diff --git a/examples/ZeldovichPancake_3D/run.sh b/examples/Cosmology/ZeldovichPancake_3D/run.sh
similarity index 74%
rename from examples/ZeldovichPancake_3D/run.sh
rename to examples/Cosmology/ZeldovichPancake_3D/run.sh
index b3f802f978377a9615f7cdd1cdd14e85ae3baad2..0be82b2f003143f3a783b2939a4ae932952d02c0 100755
--- a/examples/ZeldovichPancake_3D/run.sh
+++ b/examples/Cosmology/ZeldovichPancake_3D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -c -G -t 8 zeldovichPancake.yml 2>&1 | tee output.log
+../../swift --hydro --cosmology --self-gravity --threads=8 zeldovichPancake.yml 2>&1 | tee output.log
 
 # Plot the result
 for i in {0..119}
diff --git a/examples/ZeldovichPancake_3D/zeldovichPancake.yml b/examples/Cosmology/ZeldovichPancake_3D/zeldovichPancake.yml
similarity index 94%
rename from examples/ZeldovichPancake_3D/zeldovichPancake.yml
rename to examples/Cosmology/ZeldovichPancake_3D/zeldovichPancake.yml
index 5cfa01ff954a959e06076035ae22240bb3c5a120..6a7c5166635b7fa0ed5f69c41461d867c3b254ad 100644
--- a/examples/ZeldovichPancake_3D/zeldovichPancake.yml
+++ b/examples/Cosmology/ZeldovichPancake_3D/zeldovichPancake.yml
@@ -24,7 +24,8 @@ Snapshots:
   basename:            zeldovichPancake # Common part of the name of output files
   time_first:          0.       # Time of the first output (in internal units)
   delta_time:          1.04     # Time difference between consecutive outputs (in internal units)
-  scale_factor_first: 0.00991
+  scale_factor_first:  0.00991
+  compression:         4
 
 # Parameters governing the conserved quantities statistics
 Statistics:
@@ -34,11 +35,13 @@ Statistics:
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation 
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  h_min_ratio:           0.1
 
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./zeldovichPancake.hdf5       # The file to read
-
+  periodic:   1
+  
 Scheduler:
   max_top_level_cells: 8
   cell_split_size:     50
diff --git a/examples/EAGLE_12/getIC.sh b/examples/EAGLE_12/getIC.sh
deleted file mode 100755
index 1983a1c19fbfd67d2a13d7a59847423d217f0e4e..0000000000000000000000000000000000000000
--- a/examples/EAGLE_12/getIC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs_12.hdf5
diff --git a/examples/EAGLE_25/eagle_25.yml b/examples/EAGLE_25/eagle_25.yml
deleted file mode 100644
index d6f9ad2474cb4fc207145c73a1c1c694f2f11386..0000000000000000000000000000000000000000
--- a/examples/EAGLE_25/eagle_25.yml
+++ /dev/null
@@ -1,71 +0,0 @@
-# Define the system of units to use internally. 
-InternalUnitSystem:
-  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
-  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
-  UnitCurrent_in_cgs:  1             # Amperes
-  UnitTemp_in_cgs:     1             # Kelvin
-
-# Structure finding options
-StructureFinding:
-  config_file_name:     stf_input.cfg    # Name of the STF config file.
-  basename:             ./stf         # Common part of the name of output files.
-  output_time_format:   0             # Specifies the frequency format of structure finding. 0 for simulation steps (delta_step) and 1 for simulation time intervals (delta_time).
-  scale_factor_first:   0.92          # Scale-factor of the first snaphot (cosmological run)
-  time_first:           0.01        # Time of the first structure finding output (in internal units).
-  delta_step:           1000          # Time difference between consecutive structure finding outputs (in internal units) in simulation steps.
-  delta_time:           1.10          # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
-
-# Cosmological parameters
-Cosmology:
-  h:              0.6777        # Reduced Hubble constant
-  a_begin:        0.9090909     # Initial scale-factor of the simulation
-  a_end:          1.0           # Final scale factor of the simulation
-  Omega_m:        0.307         # Matter density parameter
-  Omega_lambda:   0.693         # Dark-energy density parameter
-  Omega_b:        0.0455        # Baryon density parameter
-  
-# Parameters governing the time integration
-TimeIntegration:
-  time_begin: 0.    # The starting time of the simulation (in internal units).
-  time_end:   1e-2  # The end time of the simulation (in internal units).
-  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
-
-Scheduler:
-  max_top_level_cells:    16
-
-# Parameters governing the snapshots
-Snapshots:
-  basename:            eagle # Common part of the name of output files
-  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
-  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
-  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
-
-# Parameters governing the conserved quantities statistics
-Statistics:
-  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
-  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
-  delta_time:          1.05 # Time between statistics output
-
-# Parameters for the self-gravity scheme
-Gravity:
-  eta:                    0.025    # Constant dimensionless multiplier for time integration. 
-  theta:                  0.7     # Opening angle (Multipole acceptance criterion)
-  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
-  max_physical_softening: 0.0007    # Physical softening length (in internal units).
-  mesh_side_length:       32
-
-# Parameters for the hydrodynamics scheme
-SPH:
-  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
-  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-  minimal_temperature:   100      # (internal units)
-
-# Parameters related to the initial conditions
-InitialConditions:
-  file_name:  ./EAGLE_ICs_25.hdf5    # The file to read
-  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
-  cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
-
-
diff --git a/examples/EAGLE_25/getIC.sh b/examples/EAGLE_25/getIC.sh
deleted file mode 100755
index 4577db3a351f5b9ce16962897c664cd12108b01c..0000000000000000000000000000000000000000
--- a/examples/EAGLE_25/getIC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs_25.hdf5
diff --git a/examples/EAGLE_50/eagle_50.yml b/examples/EAGLE_50/eagle_50.yml
deleted file mode 100644
index 04c157fa86fc25f90a952e0c216285aa2235cb72..0000000000000000000000000000000000000000
--- a/examples/EAGLE_50/eagle_50.yml
+++ /dev/null
@@ -1,61 +0,0 @@
-# Define the system of units to use internally. 
-InternalUnitSystem:
-  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
-  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
-  UnitCurrent_in_cgs:  1             # Amperes
-  UnitTemp_in_cgs:     1             # Kelvin
-
-# Cosmological parameters
-Cosmology:
-  h:              0.6777        # Reduced Hubble constant
-  a_begin:        0.9090909     # Initial scale-factor of the simulation
-  a_end:          1.0           # Final scale factor of the simulation
-  Omega_m:        0.307         # Matter density parameter
-  Omega_lambda:   0.693         # Dark-energy density parameter
-  Omega_b:        0.0455        # Baryon density parameter
-  
-# Parameters governing the time integration
-TimeIntegration:
-  time_begin: 0.    # The starting time of the simulation (in internal units).
-  time_end:   1e-2  # The end time of the simulation (in internal units).
-  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
-
-Scheduler:
-  max_top_level_cells: 20
-
-# Parameters governing the snapshots
-Snapshots:
-  basename:            eagle # Common part of the name of output files
-  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
-  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
-  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
-
-# Parameters governing the conserved quantities statistics
-Statistics:
-  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
-  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
-  delta_time:          1.05 # Time between statistics output
-
-# Parameters for the self-gravity scheme
-Gravity:
-  eta:                    0.025    # Constant dimensionless multiplier for time integration.
-  theta:                  0.7      # Opening angle (Multipole acceptance criterion)
-  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
-  max_physical_softening: 0.0007    # Physical softening length (in internal units).
-  mesh_side_length:       64
-
-# Parameters for the hydrodynamics scheme
-SPH:
-  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
-  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-  minimal_temperature:   100      # (internal units)
-
-# Parameters related to the initial conditions
-InitialConditions:
-  file_name:  ./EAGLE_ICs_50.hdf5     # The file to read
-  cleanup_h_factors: 1                # Remove the h-factors inherited from Gadget
-  cleanup_velocity_factors: 1         # Remove the sqrt(a) factor in the velocities inherited from Gadget
-
-
diff --git a/examples/EAGLE_50/getIC.sh b/examples/EAGLE_50/getIC.sh
deleted file mode 100755
index f898a02fac4f66f1d186d61a8d48d7b1f81a2af4..0000000000000000000000000000000000000000
--- a/examples/EAGLE_50/getIC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs_50.hdf5
diff --git a/examples/EAGLE_6/getIC.sh b/examples/EAGLE_6/getIC.sh
deleted file mode 100755
index 08daa32a9b708532ab3e78924fb44f7c5dd06795..0000000000000000000000000000000000000000
--- a/examples/EAGLE_6/getIC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs_6.hdf5
diff --git a/examples/EAGLE_DMO_100/README b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/README
similarity index 100%
rename from examples/EAGLE_DMO_100/README
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_100/README
diff --git a/examples/EAGLE_DMO_100/eagle_100.yml b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/eagle_100.yml
similarity index 99%
rename from examples/EAGLE_DMO_100/eagle_100.yml
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_100/eagle_100.yml
index f04c32c8d08b5548c2c710cf8782b39a59c3821e..5a3066195647b79eeb6a6d67d037d15ce8370c39 100644
--- a/examples/EAGLE_DMO_100/eagle_100.yml
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/eagle_100.yml
@@ -49,6 +49,7 @@ Gravity:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  EAGLE_DMO_ICs_100.hdf5
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/EAGLE_DMO_50/getIC.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/getIC.sh
similarity index 70%
rename from examples/EAGLE_DMO_50/getIC.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_100/getIC.sh
index d100cbba766200238ae1d91b9e959f143d25f869..d13bd4fea4ca0a2b3400f7d17b15f6fafd74021f 100755
--- a/examples/EAGLE_DMO_50/getIC.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/getIC.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_ICs_50.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_low_z/EAGLE_DMO_ICs_100.hdf5
diff --git a/examples/EAGLE_DMO_100/run.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/run.sh
similarity index 70%
rename from examples/EAGLE_DMO_100/run.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_100/run.sh
index 642c9247cf4aefa299e8f11c9674d737f4770296..be3cdd8e3cbc4cdbb0d8ab039bbbaa0f8e9ce2a1 100755
--- a/examples/EAGLE_DMO_100/run.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_100/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -G -t 16 eagle_100.yml 2>&1 | tee output.log
+../../swift --cosmology --self-gravity --threads=16 eagle_100.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_DMO_12/README b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/README
similarity index 100%
rename from examples/EAGLE_DMO_12/README
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_12/README
diff --git a/examples/EAGLE_DMO_12/eagle_12.yml b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/eagle_12.yml
similarity index 99%
rename from examples/EAGLE_DMO_12/eagle_12.yml
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_12/eagle_12.yml
index 2354216a5b0dcefe139d6e39699b4c67035a4173..0660d98e87adfae62a2d795efec7ad6509cc1354 100644
--- a/examples/EAGLE_DMO_12/eagle_12.yml
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/eagle_12.yml
@@ -51,6 +51,7 @@ Gravity:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  EAGLE_DMO_ICs_12.hdf5
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/EAGLE_DMO_100/getIC.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/getIC.sh
similarity index 71%
rename from examples/EAGLE_DMO_100/getIC.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_12/getIC.sh
index 22e8be4afe2a9533dd6a25bf057b54cee6bf847b..3b2e613c5e21079a542eeb33758bf0808b84df67 100755
--- a/examples/EAGLE_DMO_100/getIC.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/getIC.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_ICs_100.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_low_z/EAGLE_DMO_ICs_12.hdf5
diff --git a/examples/EAGLE_DMO_12/run.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/run.sh
similarity index 70%
rename from examples/EAGLE_DMO_12/run.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_12/run.sh
index ebf24ee6a5c873d595c58e74a31838eb2d013d92..669fa49823cd65ff336c60964e5e565e925c53c5 100755
--- a/examples/EAGLE_DMO_12/run.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_12/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -G -t 16 eagle_12.yml 2>&1 | tee output.log
+../../swift --cosmology --self-gravity --threads=16 eagle_12.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_DMO_25/README b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/README
similarity index 100%
rename from examples/EAGLE_DMO_25/README
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_25/README
diff --git a/examples/EAGLE_DMO_25/eagle_25.yml b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/eagle_25.yml
similarity index 99%
rename from examples/EAGLE_DMO_25/eagle_25.yml
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_25/eagle_25.yml
index b02f9742a597687d2742b7c2d9eddf836258b06a..558c68ffaad204ebbe1d5781f945f0d95108d227 100644
--- a/examples/EAGLE_DMO_25/eagle_25.yml
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/eagle_25.yml
@@ -50,6 +50,7 @@ Gravity:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  EAGLE_DMO_ICs_25.hdf5
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/EAGLE_DMO_12/getIC.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/getIC.sh
similarity index 71%
rename from examples/EAGLE_DMO_12/getIC.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_25/getIC.sh
index a05321e1b1f4a0c48189a3b9ce05c39549c6fda5..6a796a9d0c1e650a611c3cf7a79606066e02e438 100755
--- a/examples/EAGLE_DMO_12/getIC.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/getIC.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_ICs_12.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_low_z/EAGLE_DMO_ICs_25.hdf5
diff --git a/examples/EAGLE_DMO_25/run.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/run.sh
similarity index 70%
rename from examples/EAGLE_DMO_25/run.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_25/run.sh
index ae0a6d3c49b89239da973c7417530204b4751729..6d96edda655c7bf2d18ab8312417722f78bdb7e0 100755
--- a/examples/EAGLE_DMO_25/run.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_25/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -G -t 16 eagle_25.yml 2>&1 | tee output.log
+../../swift --cosmology --self-gravity --threads=16 eagle_25.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_DMO_50/README b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/README
similarity index 100%
rename from examples/EAGLE_DMO_50/README
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_50/README
diff --git a/examples/EAGLE_DMO_50/eagle_50.yml b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/eagle_50.yml
similarity index 99%
rename from examples/EAGLE_DMO_50/eagle_50.yml
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_50/eagle_50.yml
index 97299df063cd1f611f59a56ccd9b091b1217bef3..3cab2b1dc869b5187cf647caa7893281b783591a 100644
--- a/examples/EAGLE_DMO_50/eagle_50.yml
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/eagle_50.yml
@@ -49,6 +49,7 @@ Gravity:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  EAGLE_DMO_ICs_50.hdf5
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/EAGLE_DMO_25/getIC.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/getIC.sh
similarity index 71%
rename from examples/EAGLE_DMO_25/getIC.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_50/getIC.sh
index 72b08086d77f619b2a89a820557975af8da5ce75..752d58d0096fb3bbbf41ff0ade789f045ff7f90c 100755
--- a/examples/EAGLE_DMO_25/getIC.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/getIC.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_ICs_25.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_DMO_low_z/EAGLE_DMO_ICs_50.hdf5
diff --git a/examples/EAGLE_DMO_50/run.sh b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/run.sh
similarity index 70%
rename from examples/EAGLE_DMO_50/run.sh
rename to examples/EAGLE_DMO_low_z/EAGLE_DMO_50/run.sh
index 31980a5a883e62c972b27a41bbdebe06c7c71539..8a08d0b0408c39f58a53aeff660123f6afc5777b 100755
--- a/examples/EAGLE_DMO_50/run.sh
+++ b/examples/EAGLE_DMO_low_z/EAGLE_DMO_50/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -G -t 16 eagle_50.yml 2>&1 | tee output.log
+../../swift --cosmology --self-gravity --threads=16 eagle_50.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_DMO_low_z/README b/examples/EAGLE_DMO_low_z/README
new file mode 100644
index 0000000000000000000000000000000000000000..a141084b971a2a08592dd4cd42a7f54025b056fc
--- /dev/null
+++ b/examples/EAGLE_DMO_low_z/README
@@ -0,0 +1,3 @@
+This directory contains initial conditions generated from
+the z=0.1 DMONLY snapshots of the EAGLE suite of simulations. They
+are ideal for testing the late-time behaviour of the code.
diff --git a/examples/EAGLE_ICs/EAGLE_12/README b/examples/EAGLE_ICs/EAGLE_12/README
new file mode 100644
index 0000000000000000000000000000000000000000..1c2b4fc1eb0dee875bf187f05e039c943d0b8b84
--- /dev/null
+++ b/examples/EAGLE_ICs/EAGLE_12/README
@@ -0,0 +1,3 @@
+Initial conditions corresponding to the 12.5 Mpc volume
+of the EAGLE suite. The ICs only contain DM particles. The
+gas particles will be generated in SWIFT.
diff --git a/examples/EAGLE_ICs/EAGLE_12/eagle_12.yml b/examples/EAGLE_ICs/EAGLE_12/eagle_12.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4c3d7f0a3516ea7919a7ecd28efa6808c2f0f046
--- /dev/null
+++ b/examples/EAGLE_ICs/EAGLE_12/eagle_12.yml
@@ -0,0 +1,113 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.0078125     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
+  
+# Parameters governing the snapshots
+Snapshots:
+  basename:            eagle # Common part of the name of output files
+  scale_factor_first:  0.05
+  delta_time:          1.02
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1.02
+  scale_factor_first:   0.05
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025     # Constant dimensionless multiplier for time integration.
+  theta:                  0.7       # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       64
+  
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   100.0    # (internal units)
+  initial_temperature:   268.7
+
+Scheduler:
+  max_top_level_cells:   16
+  cell_split_size:       100
+  tasks_per_cell:        5
+
+Restarts:
+  delta_hours:  1.0
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  EAGLE_L0012N0188_ICs.hdf5
+  periodic:   1
+  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
+  cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
+  generate_gas_in_ics: 1             # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths: 1       # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Impose primoridal metallicity
+EAGLEChemistry:
+  init_abundance_metal:     0.
+  init_abundance_Hydrogen:  0.752
+  init_abundance_Helium:    0.248
+  init_abundance_Carbon:    0.0
+  init_abundance_Nitrogen:  0.0
+  init_abundance_Oxygen:    0.0
+  init_abundance_Neon:      0.0
+  init_abundance_Magnesium: 0.0
+  init_abundance_Silicon:   0.0
+  init_abundance_Iron:      0.0
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
+
diff --git a/examples/EAGLE_100/getIC.sh b/examples/EAGLE_ICs/EAGLE_12/getIC.sh
similarity index 72%
rename from examples/EAGLE_100/getIC.sh
rename to examples/EAGLE_ICs/EAGLE_12/getIC.sh
index 227df3f9f79d294cd8ccbfd3b72b02dfbea2ebd6..fd4db0384b208f9aecdc4a90084b71ccd2eed444 100755
--- a/examples/EAGLE_100/getIC.sh
+++ b/examples/EAGLE_ICs/EAGLE_12/getIC.sh
@@ -1,2 +1,2 @@
 #!/bin/bash
-wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs_100.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_ICs/EAGLE_L0012N0188_ICs.hdf5
diff --git a/examples/EAGLE_ICs/EAGLE_12/run.sh b/examples/EAGLE_ICs/EAGLE_12/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..12c962c29ddd21290624906dfbcca166e171203b
--- /dev/null
+++ b/examples/EAGLE_ICs/EAGLE_12/run.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e EAGLE_L0012N0188_ICs.hdf5 ]
+then
+    echo "Fetching initial conditions for the EAGLE 12Mpc example..."
+    ./getIC.sh
+fi
+
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_12.yml 2>&1 | tee output.log
+
diff --git a/examples/EAGLE_ICs/README b/examples/EAGLE_ICs/README
new file mode 100644
index 0000000000000000000000000000000000000000..3a44c6fc97c8e1759479561780960d19df43c97f
--- /dev/null
+++ b/examples/EAGLE_ICs/README
@@ -0,0 +1,4 @@
+This directory contains initial conditions generated for
+the EAGLE suite of simulations. The cosmology, resolution
+and phases are the same as used in the original suite. The only
+difference is the file format, adapted for SWIFT.
diff --git a/examples/EAGLE_100/README b/examples/EAGLE_low_z/EAGLE_100/README
similarity index 100%
rename from examples/EAGLE_100/README
rename to examples/EAGLE_low_z/EAGLE_100/README
diff --git a/examples/EAGLE_100/eagle_100.yml b/examples/EAGLE_low_z/EAGLE_100/eagle_100.yml
similarity index 95%
rename from examples/EAGLE_100/eagle_100.yml
rename to examples/EAGLE_low_z/EAGLE_100/eagle_100.yml
index 439bb7eb6dc5d460752771addc83c89e27f69b7f..5275f709c710d2da1d5396e98f1d4d918e482c6d 100644
--- a/examples/EAGLE_100/eagle_100.yml
+++ b/examples/EAGLE_low_z/EAGLE_100/eagle_100.yml
@@ -23,7 +23,7 @@ TimeIntegration:
   dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
 
 Scheduler:
-  max_top_level_cells: 80
+  max_top_level_cells: 32
 
 # Parameters governing the snapshots
 Snapshots:
@@ -44,16 +44,19 @@ Gravity:
   theta:                  0.85      # Opening angle (Multipole acceptance criterion)
   comoving_softening:     0.0026994 # Comoving softening length (in internal units).
   max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       256
 
 # Parameters for the hydrodynamics scheme
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
   minimal_temperature:   100      # (internal units)
 
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./EAGLE_ICs_100.hdf5   # The file to read
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/EAGLE_low_z/EAGLE_100/getIC.sh b/examples/EAGLE_low_z/EAGLE_100/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..36053deb5ff80c2ddf740b2379d5d145d091466c
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_100/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_low_z/EAGLE_ICs_100.hdf5
diff --git a/examples/EAGLE_100/run.sh b/examples/EAGLE_low_z/EAGLE_100/run.sh
similarity index 65%
rename from examples/EAGLE_100/run.sh
rename to examples/EAGLE_low_z/EAGLE_100/run.sh
index 9c990a902a6350eff348aad40c482723d1ba954c..28571d4803cb8c26bf67d84870c10a2e7dcf534c 100755
--- a/examples/EAGLE_100/run.sh
+++ b/examples/EAGLE_low_z/EAGLE_100/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -s -G -S -t 16 eagle_100.yml 2>&1 | tee output.log
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_100.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_12/README b/examples/EAGLE_low_z/EAGLE_12/README
similarity index 100%
rename from examples/EAGLE_12/README
rename to examples/EAGLE_low_z/EAGLE_12/README
diff --git a/examples/EAGLE_low_z/EAGLE_12/eagle_12.yml b/examples/EAGLE_low_z/EAGLE_12/eagle_12.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d09bbb51e90d843dd6731c5fcbd48b9c586713f9
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_12/eagle_12.yml
@@ -0,0 +1,106 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   1e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
+  
+# Parameters governing the snapshots
+Snapshots:
+  basename:            eagle # Common part of the name of output files
+  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
+  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
+  compression:         1
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
+  delta_time:          1.05 # Time between statistics output
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025     # Constant dimensionless multiplier for time integration.
+  theta:                  0.7       # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       32
+  
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   100      # (internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./EAGLE_ICs_12.hdf5    # The file to read
+  periodic:   1
+  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
+  cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
+  
+EAGLEChemistry: 	     # Solar abundances
+  init_abundance_metal:      0.014
+  init_abundance_Hydrogen:   0.70649785
+  init_abundance_Helium:     0.28055534
+  init_abundance_Carbon:     2.0665436e-3
+  init_abundance_Nitrogen:   8.3562563e-4
+  init_abundance_Oxygen:     5.4926244e-3
+  init_abundance_Neon:       1.4144605e-3
+  init_abundance_Magnesium:  5.907064e-4
+  init_abundance_Silicon:    6.825874e-4
+  init_abundance_Iron:       1.1032152e-3
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.3       # The gas fraction used internally by the model.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
diff --git a/examples/EAGLE_low_z/EAGLE_12/getIC.sh b/examples/EAGLE_low_z/EAGLE_12/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..18bac13c4fb0006cf9da00d3dd869693ef0a589d
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_12/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_low_z/EAGLE_ICs_12.hdf5
diff --git a/examples/EAGLE_12/run.sh b/examples/EAGLE_low_z/EAGLE_12/run.sh
similarity index 65%
rename from examples/EAGLE_12/run.sh
rename to examples/EAGLE_low_z/EAGLE_12/run.sh
index 67f1c24a1ead927823b9240cdeb718b35580d573..bceddf338ae797abcc32c24fb2642320d9091ba9 100755
--- a/examples/EAGLE_12/run.sh
+++ b/examples/EAGLE_low_z/EAGLE_12/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -s -G -S -t 16 eagle_12.yml 2>&1 | tee output.log
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_12.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_25/README b/examples/EAGLE_low_z/EAGLE_25/README
similarity index 100%
rename from examples/EAGLE_25/README
rename to examples/EAGLE_low_z/EAGLE_25/README
diff --git a/examples/EAGLE_low_z/EAGLE_25/eagle_25.yml b/examples/EAGLE_low_z/EAGLE_25/eagle_25.yml
new file mode 100644
index 0000000000000000000000000000000000000000..75799647b4e95ebd75202748c67a2c18c423f532
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_25/eagle_25.yml
@@ -0,0 +1,113 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Structure finding options
+StructureFinding:
+  config_file_name:     stf_input.cfg    # Name of the STF config file.
+  basename:             ./stf         # Common part of the name of output files.
+  scale_factor_first:   0.92          # Scale-factor of the first snaphot (cosmological run)
+  time_first:           0.01        # Time of the first structure finding output (in internal units).
+  delta_time:           1.10          # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+  
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   1e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            eagle # Common part of the name of output files
+  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
+  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
+  delta_time:          1.05 # Time between statistics output
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025    # Constant dimensionless multiplier for time integration. 
+  theta:                  0.7     # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       64
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   100      # (internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./EAGLE_ICs_25.hdf5    # The file to read
+  periodic:   1
+  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
+  cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
+
+EAGLEChemistry: 	     # Solar abundances
+  init_abundance_metal:      0.014
+  init_abundance_Hydrogen:   0.70649785
+  init_abundance_Helium:     0.28055534
+  init_abundance_Carbon:     2.0665436e-3
+  init_abundance_Nitrogen:   8.3562563e-4
+  init_abundance_Oxygen:     5.4926244e-3
+  init_abundance_Neon:       1.4144605e-3
+  init_abundance_Magnesium:  5.907064e-4
+  init_abundance_Silicon:    6.825874e-4
+  init_abundance_Iron:       1.1032152e-3
+  
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.3       # The gas fraction used internally by the model.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
diff --git a/examples/EAGLE_low_z/EAGLE_25/getIC.sh b/examples/EAGLE_low_z/EAGLE_25/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..9f884b57aa7d9e3980237a652d2adaa28c51be68
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_25/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_low_z/EAGLE_ICs_25.hdf5
diff --git a/examples/EAGLE_25/run.sh b/examples/EAGLE_low_z/EAGLE_25/run.sh
similarity index 65%
rename from examples/EAGLE_25/run.sh
rename to examples/EAGLE_low_z/EAGLE_25/run.sh
index 0b6cf77d7b2461864fc24055811ee00c7dd00613..ea14dbde3293bb28f98de29eb035d53bc7caa1e6 100755
--- a/examples/EAGLE_25/run.sh
+++ b/examples/EAGLE_low_z/EAGLE_25/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -s -G -S -t 16 eagle_25.yml 2>&1 | tee output.log
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_25.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_50/README b/examples/EAGLE_low_z/EAGLE_50/README
similarity index 100%
rename from examples/EAGLE_50/README
rename to examples/EAGLE_low_z/EAGLE_50/README
diff --git a/examples/EAGLE_low_z/EAGLE_50/eagle_50.yml b/examples/EAGLE_low_z/EAGLE_50/eagle_50.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6c0c7421ba4f804437a8086b42fb2878bd3904b1
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_50/eagle_50.yml
@@ -0,0 +1,108 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+  
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   1e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
+
+Scheduler:
+  max_top_level_cells: 16
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            eagle # Common part of the name of output files
+  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
+  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
+  delta_time:          1.05 # Time between statistics output
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025    # Constant dimensionless multiplier for time integration.
+  theta:                  0.7      # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       128
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   100      # (internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./EAGLE_ICs_50.hdf5     # The file to read
+  periodic:   1
+  cleanup_h_factors: 1                # Remove the h-factors inherited from Gadget
+  cleanup_velocity_factors: 1         # Remove the sqrt(a) factor in the velocities inherited from Gadget
+
+EAGLEChemistry: 	     # Solar abundances
+  init_abundance_metal:      0.014
+  init_abundance_Hydrogen:   0.70649785
+  init_abundance_Helium:     0.28055534
+  init_abundance_Carbon:     2.0665436e-3
+  init_abundance_Nitrogen:   8.3562563e-4
+  init_abundance_Oxygen:     5.4926244e-3
+  init_abundance_Neon:       1.4144605e-3
+  init_abundance_Magnesium:  5.907064e-4
+  init_abundance_Silicon:    6.825874e-4
+  init_abundance_Iron:       1.1032152e-3
+  
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.3       # The gas fraction used internally by the model.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
diff --git a/examples/EAGLE_low_z/EAGLE_50/getIC.sh b/examples/EAGLE_low_z/EAGLE_50/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..09154fb9b8933df8eaec368a2026a14704ddeca0
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_50/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_low_z/EAGLE_ICs_50.hdf5
diff --git a/examples/EAGLE_50/run.sh b/examples/EAGLE_low_z/EAGLE_50/run.sh
similarity index 65%
rename from examples/EAGLE_50/run.sh
rename to examples/EAGLE_low_z/EAGLE_50/run.sh
index a0d5dee11dc58e8d19d4d0e551c5ad8eceb90548..e2f2836900bacf612acb289154de973cda90eccb 100755
--- a/examples/EAGLE_50/run.sh
+++ b/examples/EAGLE_low_z/EAGLE_50/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -s -G -S -t 16 eagle_50.yml 2>&1 | tee output.log
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_50.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_6/README b/examples/EAGLE_low_z/EAGLE_6/README
similarity index 100%
rename from examples/EAGLE_6/README
rename to examples/EAGLE_low_z/EAGLE_6/README
diff --git a/examples/EAGLE_low_z/EAGLE_6/eagle_6.yml b/examples/EAGLE_low_z/EAGLE_6/eagle_6.yml
new file mode 100644
index 0000000000000000000000000000000000000000..313a5a384324eecd56455ea22bbc96d147982d6b
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_6/eagle_6.yml
@@ -0,0 +1,116 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Structure finding options
+StructureFinding:
+  config_file_name:     stf_input.cfg # Name of the STF config file.
+  basename:             ./stf         # Common part of the name of output files.
+  scale_factor_first:   0.92          # Scale-factor of the first snaphot (cosmological run)
+  time_first:           0.01        # Time of the first structure finding output (in internal units).
+  delta_time:           1.10          # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+  
+Scheduler:
+  max_top_level_cells:    8
+  
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   1e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
+  
+# Parameters governing the snapshots
+Snapshots:
+  basename:            eagle # Common part of the name of output files
+  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
+  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
+  compression:         1
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
+  delta_time:          1.05 # Time between statistics output
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025    # Constant dimensionless multiplier for time integration.
+  theta:                  0.7      # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  mesh_side_length:       16
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature:   100      # (internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./EAGLE_ICs_6.hdf5     # The file to read
+  periodic:   1
+  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
+  cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
+
+EAGLEChemistry: 	     # Solar abundances
+  init_abundance_metal:      0.014
+  init_abundance_Hydrogen:   0.70649785
+  init_abundance_Helium:     0.28055534
+  init_abundance_Carbon:     2.0665436e-3
+  init_abundance_Nitrogen:   8.3562563e-4
+  init_abundance_Oxygen:     5.4926244e-3
+  init_abundance_Neon:       1.4144605e-3
+  init_abundance_Magnesium:  5.907064e-4
+  init_abundance_Silicon:    6.825874e-4
+  init_abundance_Iron:       1.1032152e-3
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.3       # The gas fraction used internally by the model.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
diff --git a/examples/EAGLE_low_z/EAGLE_6/getIC.sh b/examples/EAGLE_low_z/EAGLE_6/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c1643f1b7e2550f80cc286ad9b8a09020d63f07f
--- /dev/null
+++ b/examples/EAGLE_low_z/EAGLE_6/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/EAGLE_low_z/EAGLE_ICs_6.hdf5
diff --git a/examples/EAGLE_6/run.sh b/examples/EAGLE_low_z/EAGLE_6/run.sh
similarity index 65%
rename from examples/EAGLE_6/run.sh
rename to examples/EAGLE_low_z/EAGLE_6/run.sh
index 7ef3fc2abdd1bb3fed1a228bf993bf09fb13f42c..47dbd952549137d8c2baab8c22361a227eb35ca9 100755
--- a/examples/EAGLE_6/run.sh
+++ b/examples/EAGLE_low_z/EAGLE_6/run.sh
@@ -7,5 +7,5 @@ then
     ./getIC.sh
 fi
 
-../swift -c -s -G -S -t 16 eagle_6.yml 2>&1 | tee output.log
+../../swift --cosmology --hydro --self-gravity --stars --threads=16 eagle_6.yml 2>&1 | tee output.log
 
diff --git a/examples/EAGLE_6/testVELOCIraptor.sh b/examples/EAGLE_low_z/EAGLE_6/testVELOCIraptor.sh
similarity index 76%
rename from examples/EAGLE_6/testVELOCIraptor.sh
rename to examples/EAGLE_low_z/EAGLE_6/testVELOCIraptor.sh
index 14ec30487006f0b7e86356837c9a801950c15c83..3f0ae1d6f0da9736b867f53b898752efbfd50324 100755
--- a/examples/EAGLE_6/testVELOCIraptor.sh
+++ b/examples/EAGLE_low_z/EAGLE_6/testVELOCIraptor.sh
@@ -36,8 +36,8 @@ if [ "$RUN_DM" = "1" ]; then
     rm $VEL_OUTPUT/vel_$TEST*
 
     # Run test using SWIFT + VELOCIraptor
-    echo "Running: mpirun -np $NUM_MPI_PROC ../swift_mpi -G -t 8 eagle_6.yml -x -n 5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_dmonly"
-    mpirun -np $NUM_MPI_PROC ../swift_mpi -G -t 8 eagle_6.yml -x -n 5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_dmonly
+    echo "Running: mpirun -np $NUM_MPI_PROC ../swift_mpi --self-gravity --threads=8 eagle_6.yml --velociraptor --steps=5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_dmonly"
+    mpirun -np $NUM_MPI_PROC ../swift_mpi --self-gravity --threads=8 eagle_6.yml --velociraptor --steps=5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_dmonly
 
     # Run test using VELOCIraptor
     echo "Running: mpirun -np $NUM_MPI_PROC $VELOCIRAPTOR_PATH/bin/stf-gas -I 2 -i eagle_dmonly_0000 -C $VELOCIRAPTOR_PATH/stf_input_$TEST.cfg -o ./$VEL_OUTPUT/vel_$TEST"
@@ -80,8 +80,8 @@ if [ "$RUN_GAS" = "1" ]; then
     rm $VEL_OUTPUT/vel_$TEST*
 
     # Run test using SWIFT + VELOCIraptor
-    echo "Running: mpirun -np $NUM_MPI_PROC ../swift_mpi -s -G -t 8 eagle_6.yml -x -n 5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_gas"
-    mpirun -np $NUM_MPI_PROC ../swift_mpi -s -G -t 8 eagle_6.yml -x -n 5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_gas
+    echo "Running: mpirun -np $NUM_MPI_PROC ../swift_mpi --hydro --self-gravity --threads=8 eagle_6.yml --velociraptor --steps=5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_gas"
+    mpirun -np $NUM_MPI_PROC ../swift_mpi --hydro --self-gravity --threads=8 eagle_6.yml --velociraptor --steps=5 -P StructureFinding:basename:./$OUTPUT/stf -P StructureFinding:config_file_name:./stf_input_$TEST.cfg -P Snapshots:basename:./eagle_gas
 
     # Run test using VELOCIraptor
     echo "Running: mpirun -np $NUM_MPI_PROC $VELOCIRAPTOR_PATH/bin/stf-gas -I 2 -i eagle_gas_0000 -C ./stf_input_$TEST.cfg -o ./$VEL_OUTPUT/vel_$TEST"
diff --git a/examples/EAGLE_low_z/README b/examples/EAGLE_low_z/README
new file mode 100644
index 0000000000000000000000000000000000000000..d17b8bec79adc7ebd8f9db2114a8e290a9908548
--- /dev/null
+++ b/examples/EAGLE_low_z/README
@@ -0,0 +1,3 @@
+This directory contains initial conditions generated from
+the z=0.1 snapshots of the EAGLE suite of simulations. They
+are ideal for testing the late-time behaviour of the code.
diff --git a/examples/AgoraDisk/agora_disk.yml b/examples/GEAR/AgoraDisk/agora_disk.yml
similarity index 84%
rename from examples/AgoraDisk/agora_disk.yml
rename to examples/GEAR/AgoraDisk/agora_disk.yml
index 7368700d8a2a5ca8de7d677e1da78be51d669835..92f2532b3132c0f6314b7697f0b9b65f1afedb3b 100644
--- a/examples/AgoraDisk/agora_disk.yml
+++ b/examples/GEAR/AgoraDisk/agora_disk.yml
@@ -39,20 +39,18 @@ Gravity:
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-  minimal_temperature:   10      # (internal units)
+  minimal_temperature:   10.      # Kelvin
 
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./agora_disk.hdf5     # The file to read
-  cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
-  shift:    [674.1175, 674.1175, 674.1175]   # (Optional) A shift to apply to all particles read from the ICs (in internal units).
+  periodic:   0                     # Non-periodic BCs
+  cleanup_h_factors: 1              # Remove the h-factors inherited from Gadget
+  shift:    [674.1175, 674.1175, 674.1175]   # Centre the box
 
 # Dimensionless pre-factor for the time-step condition
 LambdaCooling:
-  lambda_cgs:                  1.0e-22    # Cooling rate (in cgs units)
-  minimum_temperature:         1.0e2      # Minimal temperature (Kelvin)
-  mean_molecular_weight:       0.59       # Mean molecular weight
-  hydrogen_mass_abundance:     0.75       # Hydrogen mass abundance (dimensionless)
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
   cooling_tstep_mult:          1.0        # Dimensionless pre-factor for the time-step condition
 
 # Cooling with Grackle 2.0
diff --git a/examples/AgoraDisk/changeType.py b/examples/GEAR/AgoraDisk/changeType.py
similarity index 100%
rename from examples/AgoraDisk/changeType.py
rename to examples/GEAR/AgoraDisk/changeType.py
diff --git a/examples/AgoraDisk/cleanupSwift.py b/examples/GEAR/AgoraDisk/cleanupSwift.py
similarity index 100%
rename from examples/AgoraDisk/cleanupSwift.py
rename to examples/GEAR/AgoraDisk/cleanupSwift.py
diff --git a/examples/AgoraDisk/getIC.sh b/examples/GEAR/AgoraDisk/getIC.sh
old mode 100644
new mode 100755
similarity index 72%
rename from examples/AgoraDisk/getIC.sh
rename to examples/GEAR/AgoraDisk/getIC.sh
index 620a751bedaf6c646119247270fad6dd3f740fde..c234b52b943ccb8d6dededed7d0f5070cd9fe5b2
--- a/examples/AgoraDisk/getIC.sh
+++ b/examples/GEAR/AgoraDisk/getIC.sh
@@ -6,4 +6,4 @@ if [ "$#" -ne 1 ]; then
     exit
 fi
 
-wget https://obswww.unige.ch/~lhausamm/swift/IC/AgoraDisk/$1
+wget https://obswww.unige.ch/~lhausamm/swift/IC/AgoraDisk/$1.hdf5
diff --git a/examples/AgoraDisk/getSolution.sh b/examples/GEAR/AgoraDisk/getSolution.sh
old mode 100644
new mode 100755
similarity index 100%
rename from examples/AgoraDisk/getSolution.sh
rename to examples/GEAR/AgoraDisk/getSolution.sh
diff --git a/examples/AgoraDisk/plotSolution.py b/examples/GEAR/AgoraDisk/plotSolution.py
similarity index 100%
rename from examples/AgoraDisk/plotSolution.py
rename to examples/GEAR/AgoraDisk/plotSolution.py
diff --git a/examples/AgoraDisk/run.sh b/examples/GEAR/AgoraDisk/run.sh
old mode 100644
new mode 100755
similarity index 92%
rename from examples/AgoraDisk/run.sh
rename to examples/GEAR/AgoraDisk/run.sh
index d7e284db52c2e6750fd713b3607a7f423bac7769..5b85be7df875cee69b513e36a327b1469e35b60e
--- a/examples/AgoraDisk/run.sh
+++ b/examples/GEAR/AgoraDisk/run.sh
@@ -38,7 +38,7 @@ cp $sim.hdf5 agora_disk.hdf5
 python3 changeType.py agora_disk.hdf5
 
 # Run SWIFT
-#../swift $flag -s -G -t 4 agora_disk.yml 2>&1 | tee output.log
+#../../swift $flag --hydro --self-gravity --threads=4 agora_disk.yml 2>&1 | tee output.log
 
 
 echo "Changing smoothing length to be Gadget compatible"
diff --git a/examples/GEAR/DwarfGalaxy/README b/examples/GEAR/DwarfGalaxy/README
new file mode 100644
index 0000000000000000000000000000000000000000..7a9167694a24c088997316180233b28b9126f298
--- /dev/null
+++ b/examples/GEAR/DwarfGalaxy/README
@@ -0,0 +1,7 @@
+This example is a galaxy extracted from the example "ZoomIn". It allows
+to test SWIFT on a smaller problem. See the README in "ZoomIn" for more
+information.
+
+
+MD5 check-sum of the ICS: 
+ae2af84d88f30011b6a8af3f37d140cf  dwarf_galaxy.hdf5
\ No newline at end of file
diff --git a/examples/EAGLE_6/eagle_6.yml b/examples/GEAR/DwarfGalaxy/dwarf_galaxy.yml
similarity index 62%
rename from examples/EAGLE_6/eagle_6.yml
rename to examples/GEAR/DwarfGalaxy/dwarf_galaxy.yml
index eb374df964e8b021ef2b7d90caf8a1824cf3a833..00fd889d4f58a4dadccc52ef5c6d8315ac2a4012 100644
--- a/examples/EAGLE_6/eagle_6.yml
+++ b/examples/GEAR/DwarfGalaxy/dwarf_galaxy.yml
@@ -1,7 +1,7 @@
 # Define the system of units to use internally. 
 InternalUnitSystem:
   UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitLength_in_cgs:   3.08567758e21 # kpc in centimeters
   UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
   UnitCurrent_in_cgs:  1             # Amperes
   UnitTemp_in_cgs:     1             # Kelvin
@@ -10,42 +10,40 @@ InternalUnitSystem:
 StructureFinding:
   config_file_name:     stf_input.cfg # Name of the STF config file.
   basename:             ./stf         # Common part of the name of output files.
-  output_time_format:   0             # Specifies the frequency format of structure finding. 0 for simulation steps (delta_step) and 1 for simulation time intervals (delta_time).
   scale_factor_first:   0.92          # Scale-factor of the first snaphot (cosmological run)
   time_first:           0.01        # Time of the first structure finding output (in internal units).
-  delta_step:           1000          # Time difference between consecutive structure finding outputs (in internal units) in simulation steps.
   delta_time:           1.10          # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
 
 # Cosmological parameters
 Cosmology:
-  h:              0.6777        # Reduced Hubble constant
-  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  h:              0.673        # Reduced Hubble constant
+  a_begin:        0.9873046739     # Initial scale-factor of the simulation
   a_end:          1.0           # Final scale factor of the simulation
-  Omega_m:        0.307         # Matter density parameter
-  Omega_lambda:   0.693         # Dark-energy density parameter
-  Omega_b:        0.0455        # Baryon density parameter
+  Omega_m:        0.315         # Matter density parameter
+  Omega_lambda:   0.685         # Dark-energy density parameter
+  Omega_b:        0.0486        # Baryon density parameter
   
 Scheduler:
   max_top_level_cells:    8
+  cell_split_size:           400       # (Optional) Maximal number of particles per cell (this is the default value).
   
 # Parameters governing the time integration
 TimeIntegration:
   time_begin: 0.    # The starting time of the simulation (in internal units).
-  time_end:   1e-2  # The end time of the simulation (in internal units).
+  time_end:   1.  # The end time of the simulation (in internal units).
   dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
+  dt_max:     1e-5  # The maximal time-step size of the simulation (in internal units).
   
 # Parameters governing the snapshots
 Snapshots:
-  basename:            eagle # Common part of the name of output files
-  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
-  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
-  delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
+  basename:            dwarf_galaxy # Common part of the name of output files
+  time_first:          0.  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time:          0.02  # Time difference between consecutive outputs (in internal units)
   compression:         1
 
 # Parameters governing the conserved quantities statistics
 Statistics:
-  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  scale_factor_first:  0.987345 # Scale-factor of the first stat dump (cosmological run)
   time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
   delta_time:          1.05 # Time between statistics output
 
@@ -53,8 +51,8 @@ Statistics:
 Gravity:
   eta:                    0.025    # Constant dimensionless multiplier for time integration.
   theta:                  0.7      # Opening angle (Multipole acceptance criterion)
-  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
-  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  comoving_softening:     0.05 # Comoving softening length (in internal units).
+  max_physical_softening: 0.01    # Physical softening length (in internal units).
   mesh_side_length:       16
 
 # Parameters for the hydrodynamics scheme
@@ -65,8 +63,8 @@ SPH:
 
 # Parameters related to the initial conditions
 InitialConditions:
-  file_name:  ./EAGLE_ICs_6.hdf5     # The file to read
+  file_name:  ./dwarf_galaxy.hdf5     # The file to read
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
-
diff --git a/examples/GEAR/DwarfGalaxy/getIC.sh b/examples/GEAR/DwarfGalaxy/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..92f4cd3939845d57a61683e95135163b8639371f
--- /dev/null
+++ b/examples/GEAR/DwarfGalaxy/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget https://obswww.unige.ch/~lhausamm/swift/IC/DwarfGalaxy/dwarf_galaxy.hdf5
diff --git a/examples/GEAR/DwarfGalaxy/run.sh b/examples/GEAR/DwarfGalaxy/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1f63556e7f7af57885606c08fc8e1a923c2e440d
--- /dev/null
+++ b/examples/GEAR/DwarfGalaxy/run.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e dwarf_galaxy.hdf5 ]
+then
+    echo "Fetching initial conditions for the dwarf galaxy example..."
+    ./getIC.sh
+fi
+
+../../swift --feedback --self-gravity --hydro --stars --threads=8 $@ dwarf_galaxy.yml 2>&1 | tee output.log
+
diff --git a/examples/GEAR/ZoomIn/README b/examples/GEAR/ZoomIn/README
new file mode 100644
index 0000000000000000000000000000000000000000..cffc275f2ae1046156d392f8725a7b542c80471a
--- /dev/null
+++ b/examples/GEAR/ZoomIn/README
@@ -0,0 +1,16 @@
+Initial conditions for a zoom in cosmological simulation of dwarf
+galaxies. These have been generated by MUSIC and ran up to z=0 with
+GEAR (see Revaz and Jablonka 2018 for more details on the simulation).
+
+The cosmology is taken from Planck 2015.
+
+The initial conditions have been cleaned to contain only the required
+fields. The ICs have been created for Gadget and the positions and box
+size are hence expressed in h-full units (e.g. box size of 32 / h Mpc).
+Similarly, the peculiar velocitites contain an extra sqrt(a) factor. 
+
+We will use SWIFT to cancel the h- and a-factors from the ICs. Gas
+particles will be generated at startup.
+
+MD5 check-sum of the ICS: 
+9aafe154438478ed435e88664c1c5dba zoom_in.hdf5
diff --git a/examples/GEAR/ZoomIn/getIC.sh b/examples/GEAR/ZoomIn/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6cdfaec981af515249578faa72798c53448e7ecb
--- /dev/null
+++ b/examples/GEAR/ZoomIn/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget https://obswww.unige.ch/~lhausamm/swift/IC/ZoomIn/zoom_in.hdf5
diff --git a/examples/GEAR/ZoomIn/run.sh b/examples/GEAR/ZoomIn/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..59c8ff0d63b504978e4d74abbce8680f65695ffa
--- /dev/null
+++ b/examples/GEAR/ZoomIn/run.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e zoom_in.hdf5 ]
+then
+    echo "Fetching initial conditions for the zoom in example..."
+    ./getIC.sh
+fi
+
+../../swift --feedback --cosmology --self-gravity --hydro --stars --threads=8 zoom_in.yml 2>&1 | tee output.log
+
diff --git a/examples/EAGLE_12/eagle_12.yml b/examples/GEAR/ZoomIn/zoom_in.yml
similarity index 65%
rename from examples/EAGLE_12/eagle_12.yml
rename to examples/GEAR/ZoomIn/zoom_in.yml
index 8ebe29fb0216e16aeaafcdc086085d8c9879fc5f..8e5763c4af700b7fd95beb6188ed886198b559b3 100644
--- a/examples/EAGLE_12/eagle_12.yml
+++ b/examples/GEAR/ZoomIn/zoom_in.yml
@@ -1,20 +1,23 @@
 # Define the system of units to use internally. 
 InternalUnitSystem:
   UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitLength_in_cgs:   3.08567758e21 # kpc in centimeters
   UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
   UnitCurrent_in_cgs:  1             # Amperes
   UnitTemp_in_cgs:     1             # Kelvin
 
 # Cosmological parameters
 Cosmology:
-  h:              0.6777        # Reduced Hubble constant
-  a_begin:        0.9090909     # Initial scale-factor of the simulation
+  h:              0.673        # Reduced Hubble constant
+  a_begin:        0.9873046739     # Initial scale-factor of the simulation
   a_end:          1.0           # Final scale factor of the simulation
-  Omega_m:        0.307         # Matter density parameter
-  Omega_lambda:   0.693         # Dark-energy density parameter
-  Omega_b:        0.0455        # Baryon density parameter
-
+  Omega_m:        0.315         # Matter density parameter
+  Omega_lambda:   0.685         # Dark-energy density parameter
+  Omega_b:        0.0486        # Baryon density parameter
+  
+Scheduler:
+  max_top_level_cells:    8
+  
 # Parameters governing the time integration
 TimeIntegration:
   time_begin: 0.    # The starting time of the simulation (in internal units).
@@ -22,31 +25,28 @@ TimeIntegration:
   dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
   dt_max:     1e-3  # The maximal time-step size of the simulation (in internal units).
   
-Scheduler:
-  max_top_level_cells:    8
-
 # Parameters governing the snapshots
 Snapshots:
-  basename:            eagle # Common part of the name of output files
-  scale_factor_first:  0.91  # Scale-factor of the first snaphot (cosmological run)
+  basename:            zoom_in # Common part of the name of output files
+  scale_factor_first:  0.987345  # Scale-factor of the first snaphot (cosmological run)
   time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
   delta_time:          1.01  # Time difference between consecutive outputs (in internal units)
   compression:         1
 
 # Parameters governing the conserved quantities statistics
 Statistics:
-  scale_factor_first:  0.92 # Scale-factor of the first stat dump (cosmological run)
+  scale_factor_first:  0.987345 # Scale-factor of the first stat dump (cosmological run)
   time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
   delta_time:          1.05 # Time between statistics output
 
 # Parameters for the self-gravity scheme
 Gravity:
-  eta:                    0.025     # Constant dimensionless multiplier for time integration.
-  theta:                  0.7       # Opening angle (Multipole acceptance criterion)
-  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
-  max_physical_softening: 0.0007    # Physical softening length (in internal units).
-  mesh_side_length:       32
-  
+  eta:                    0.025    # Constant dimensionless multiplier for time integration.
+  theta:                  0.7      # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.05 # Comoving softening length (in internal units).
+  max_physical_softening: 0.01    # Physical softening length (in internal units).
+  mesh_side_length:       16
+
 # Parameters for the hydrodynamics scheme
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
@@ -55,7 +55,8 @@ SPH:
 
 # Parameters related to the initial conditions
 InitialConditions:
-  file_name:  ./EAGLE_ICs_12.hdf5    # The file to read
+  file_name:  ./zoom_in.hdf5     # The file to read
+  periodic:   1
   cleanup_h_factors: 1               # Remove the h-factors inherited from Gadget
   cleanup_velocity_factors: 1        # Remove the sqrt(a) factor in the velocities inherited from Gadget
 
diff --git a/examples/GiantImpacts/EoSTables/get_eos_tables.sh b/examples/GiantImpacts/EoSTables/get_eos_tables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..979431777fceee421468138aa0180295f45adef2
--- /dev/null
+++ b/examples/GiantImpacts/EoSTables/get_eos_tables.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/HM80_HHe.txt
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/HM80_ice.txt
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/HM80_rock.txt
+
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/SESAME_basalt_7530.txt
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/SESAME_iron_2140.txt
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/SESAME_water_7154.txt
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/EoS/SS08_water.txt
diff --git a/examples/GiantImpacts/GiantImpact/README.md b/examples/GiantImpacts/GiantImpact/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c7d8687886ee9b737ecb492d20d73e780233e9df
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/README.md
@@ -0,0 +1,2 @@
+An example planetary simulation of a giant impact onto the young Uranus with 
+~10^6 SPH particles, as described in Kegerreis et al. (2018), ApJ, 861, 52.
diff --git a/examples/GiantImpacts/GiantImpact/configuration.yml b/examples/GiantImpacts/GiantImpact/configuration.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ccce852862bec6d1eeba2c132457678564979b8a
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/configuration.yml
@@ -0,0 +1,2 @@
+with-hydro:             planetary 
+with-equation-of-state: planetary
diff --git a/examples/GiantImpacts/GiantImpact/get_init_cond.sh b/examples/GiantImpacts/GiantImpact/get_init_cond.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e81035ead7e50ef204bb6bb476e94c7fe0eae0f6
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/get_init_cond.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/GiantImpacts/uranus_1e6.hdf5
diff --git a/examples/GiantImpacts/GiantImpact/output_list.txt b/examples/GiantImpacts/GiantImpact/output_list.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0d233fb11ceea02e40c42b8d512e9c1afbe6e835
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/output_list.txt
@@ -0,0 +1,7 @@
+# Time
+4000
+9000
+14000
+20000
+30000
+40000
\ No newline at end of file
diff --git a/examples/GiantImpacts/GiantImpact/plot_solution.py b/examples/GiantImpacts/GiantImpact/plot_solution.py
new file mode 100644
index 0000000000000000000000000000000000000000..faeb071487adc04f0ba37d20967f693ed84652ec
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/plot_solution.py
@@ -0,0 +1,144 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2019 Jacob Kegerreis (jacob.kegerreis@durham.ac.uk)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+# Plot the snapshots from the example giant impact on Uranus, showing the 
+# particles in a thin slice near z=0, coloured by their material, similarish
+# (but not identical) to Fig. 2 in Kegerreis et al. (2018).
+
+import matplotlib
+matplotlib.use("Agg")
+import matplotlib.pyplot as plt
+import numpy as np
+# import swiftsimio as sw
+import h5py
+
+font_size   = 20
+params      = { 
+    'axes.labelsize'    : font_size,
+    'font.size'         : font_size,
+    'xtick.labelsize'   : font_size,
+    'ytick.labelsize'   : font_size,
+    'text.usetex'       : True,
+    'font.family'       : 'serif',
+    }
+matplotlib.rcParams.update(params)
+
+# Snapshot output times
+output_list = [4000, 9000, 14000, 20000, 30000, 40000]
+
+# Material IDs ( = type_id * type_factor + unit_id )
+type_factor = 100
+type_HM80   = 2
+id_body     = 10000
+# Name and ID
+Di_mat_id   = {
+    'HM80_HHe'      : type_HM80 * type_factor,      # Hydrogen-helium atmosphere
+    'HM80_ice'      : type_HM80 * type_factor + 1,  # H20-CH4-NH3 ice mix
+    'HM80_ice_2'    : type_HM80 * type_factor + 1 + id_body,
+    'HM80_rock'     : type_HM80 * type_factor + 2,  # SiO2-MgO-FeS-FeO rock mix
+    'HM80_rock_2'   : type_HM80 * type_factor + 2 + id_body,
+    }
+# ID and colour
+Di_id_colour    = {
+    Di_mat_id['HM80_HHe']       : '#33DDFF',
+    Di_mat_id['HM80_ice']       : 'lightsteelblue',
+    Di_mat_id['HM80_ice_2']     : '#A080D0',
+    Di_mat_id['HM80_rock']      : 'slategrey',
+    Di_mat_id['HM80_rock_2']    : '#706050',
+    }
+   
+def get_snapshot_slice(snapshot):
+    """ Load and select the particles to plot. """
+    # Load particle data
+    # data    = load("uranus_1e6_%06d.hdf5" % snapshot)
+    # id      = data.gas.particle_ids
+    # pos     = data.gas.coordinates
+    # mat_id  = data.gas.material
+    with h5py.File("uranus_1e6_%06d.hdf5" % snapshot, 'r') as f:
+        id      = f['PartType0/ParticleIDs'].value
+        pos     = (f['PartType0/Coordinates'].value
+                   - 0.5 * f['Header'].attrs['BoxSize'])
+        mat_id  = f['PartType0/MaterialID'].value
+                   
+    # Edit the material ID of particles in the impactor
+    num_in_target   = 869104
+    sel_id          = np.where(num_in_target < id)[0]
+    mat_id[sel_id]  += id_body
+    
+    # Select particles in a thin slice around z=0
+    z_min   = -0.1
+    z_max   = 0.1
+    sel_z   = np.where((z_min < pos[:, 2]) & (pos[:, 2] < z_max))[0]
+    pos     = pos[sel_z]
+    mat_id  = mat_id[sel_z]
+    
+    return pos, mat_id
+
+def plot_snapshot_slice(pos, mat_id):
+    """ Plot the particles, coloured by their material. """
+    colour  = np.empty(len(pos), dtype=object)
+    for id, c in Di_id_colour.items():
+        sel_c           = np.where(mat_id == id)[0]
+        colour[sel_c]   = c
+
+    ax.scatter(pos[:, 0], pos[:, 1], c=colour, edgecolors='none', marker='.', 
+               s=10, alpha=0.5, zorder=0)
+
+# Set up the figure
+fig     = plt.figure(figsize=(12, 8))
+gs      = matplotlib.gridspec.GridSpec(2, 3)
+axes    = [plt.subplot(gs[i_y, i_x]) for i_y in range(2) for i_x in range(3)]
+
+# Plot each snapshot
+for i_ax, ax in enumerate(axes):
+    plt.sca(ax)
+    ax.set_rasterization_zorder(1)
+    
+    # Load and select the particles to plot
+    pos, mat_id = get_snapshot_slice(output_list[i_ax])
+    
+    # Plot the particles, coloured by their material
+    plot_snapshot_slice(pos, mat_id)
+    
+    # Axes etc.
+    ax.set_aspect('equal')
+    ax.set_facecolor('k')
+    
+    ax.set_xlim(-13, 13)
+    ax.set_ylim(-13, 13)
+
+    if i_ax in [0, 3]:
+        ax.set_ylabel(r"y Postion $(R_\oplus)$")
+    else: 
+        ax.set_yticklabels([])
+    if 2 < i_ax:
+        ax.set_xlabel(r"x Postion $(R_\oplus)$")
+    else: 
+        ax.set_xticklabels([])
+        
+    # Corner time labels 
+    x   = ax.get_xlim()[0] + 0.04 * (ax.get_xlim()[1] - ax.get_xlim()[0])
+    y   = ax.get_ylim()[0] + 0.89 * (ax.get_ylim()[1] - ax.get_ylim()[0])
+    ax.text(x, y, "%.1f h" % (output_list[i_ax] / 60**2), color='w')
+
+plt.subplots_adjust(wspace=0, hspace=0)
+plt.tight_layout()
+
+# Save
+plt.savefig("uranus_1e6.pdf", dpi=200)
\ No newline at end of file
diff --git a/examples/GiantImpacts/GiantImpact/run.sh b/examples/GiantImpacts/GiantImpact/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5a4f0a74dd098b1fff4659a7d72be3845ad47fc6
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/run.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Get the initial conditions if they are not present.
+if [ ! -e uranus_1e6.hdf5 ]
+then
+    echo "Fetching initial conditions file for the Uranus impact example..."
+    ./get_init_cond.sh
+fi
+
+# Get the EoS tables if they are not present.
+cd ../EoSTables
+if [ ! -e HM80_HHe.txt ] || [ ! -e HM80_ice.txt ] || [ ! -e HM80_rock.txt ] 
+then
+    echo "Fetching equations of state tables for the Uranus impact example..."
+    ./get_eos_tables.sh
+fi
+cd ../GiantImpact
+
+# Run SWIFT
+../../swift -s -G -t 8 uranus_1e6.yml 2>&1 | tee output.log
+
+# Plot the solution
+python3 plot_solution.py
diff --git a/examples/GiantImpacts/GiantImpact/system.yml b/examples/GiantImpacts/GiantImpact/system.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c99fc7158854ec538e68c44ff74bbb0ba1adfb48
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/system.yml
@@ -0,0 +1,9 @@
+input:
+  - uranus_1e6.yml
+  - output_list.txt
+
+output:
+  - uranus_1e6.pdf
+
+swift_parameters:   -s -G
+swift_threads:      8
diff --git a/examples/GiantImpacts/GiantImpact/uranus_1e6.yml b/examples/GiantImpacts/GiantImpact/uranus_1e6.yml
new file mode 100644
index 0000000000000000000000000000000000000000..355748d847097623f171078c2ca8372e06a06efa
--- /dev/null
+++ b/examples/GiantImpacts/GiantImpact/uranus_1e6.yml
@@ -0,0 +1,63 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+    UnitMass_in_cgs:        5.9724e27   # Grams
+    UnitLength_in_cgs:      6.371e8     # Centimeters
+    UnitVelocity_in_cgs:    6.371e8     # Centimeters per second
+    UnitCurrent_in_cgs:     1           # Amperes
+    UnitTemp_in_cgs:        1           # Kelvin
+
+# Parameters related to the initial conditions
+InitialConditions:      
+    file_name:  uranus_1e6.hdf5         # The initial conditions file to read
+    periodic:   0                       # Are we running with periodic ICs?
+
+# Parameters governing the time integration
+TimeIntegration:
+    time_begin:     0                   # The starting time of the simulation (in internal units).
+    time_end:       40000               # The end time of the simulation (in internal units).
+    dt_min:         0.0001              # The minimal time-step size of the simulation (in internal units).
+    dt_max:         100                 # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+    basename:           uranus_1e6      # Common part of the name of output files
+    time_first:         0               # Time of the first output (in internal units)
+    delta_time:         1000            # Time difference between consecutive outputs (in internal units)
+    int_time_label_on:  1               # Enable to label the snapshots using the time rounded to an integer (in internal units)
+    output_list_on:     1               # Enable the output list
+    output_list:        output_list.txt # File containing the output times (see documentation in "Parameter File" section)
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+    time_first: 0                       # Time of the first output (in internal units)
+    delta_time: 1000                    # Time between statistics output
+
+# Parameters controlling restarts
+Restarts:
+    enable: 0                           # Whether to enable dumping restarts at fixed intervals.
+
+# Parameters for the hydrodynamics scheme
+SPH:
+    resolution_eta:     1.2348          # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+    delta_neighbours:   0.1             # The tolerance for the targetted number of neighbours.
+    CFL_condition:      0.2             # Courant-Friedrich-Levy condition for time integration.
+    h_max:              0.5             # Maximal allowed smoothing length (in internal units).
+    viscosity_alpha:    1.5             # Override for the initial value of the artificial viscosity.
+
+# Parameters for the self-gravity scheme
+Gravity:
+    eta:                    0.025       # Constant dimensionless multiplier for time integration.
+    theta:                  0.7         # Opening angle (Multipole acceptance criterion)
+    comoving_softening:     0.003       # Comoving softening length (in internal units).
+    max_physical_softening: 0.003       # Physical softening length (in internal units).
+
+# Parameters for the task scheduling
+Scheduler:
+    max_top_level_cells:    64          # Maximal number of top-level cells in any dimension. The nu
+
+# Parameters related to the equation of state
+EoS:
+    planetary_use_HM80:             1   # Whether to initialise the Hubbard & MacFarlane (1980) EOS
+    planetary_HM80_HHe_table_file:  ../EoSTables/HM80_HHe.txt
+    planetary_HM80_ice_table_file:  ../EoSTables/HM80_ice.txt
+    planetary_HM80_rock_table_file: ../EoSTables/HM80_rock.txt
diff --git a/examples/DiscPatch/GravityOnly/README b/examples/GravityTests/DiscPatch/GravityOnly/README
similarity index 100%
rename from examples/DiscPatch/GravityOnly/README
rename to examples/GravityTests/DiscPatch/GravityOnly/README
diff --git a/examples/DiscPatch/GravityOnly/disc-patch.yml b/examples/GravityTests/DiscPatch/GravityOnly/disc-patch.yml
similarity index 99%
rename from examples/DiscPatch/GravityOnly/disc-patch.yml
rename to examples/GravityTests/DiscPatch/GravityOnly/disc-patch.yml
index 4ec061add978bec82c267660cc343cf0bfa8f4c6..bcc7d1a3decfb36201b60349eedb5d214e61f9a6 100644
--- a/examples/DiscPatch/GravityOnly/disc-patch.yml
+++ b/examples/GravityTests/DiscPatch/GravityOnly/disc-patch.yml
@@ -34,7 +34,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Disc-Patch.hdf5       # The file to read
-
+  periodic:   1
+  
 # External potential parameters
 DiscPatchPotential:
   surface_density: 10.
diff --git a/examples/DiscPatch/GravityOnly/makeIC.py b/examples/GravityTests/DiscPatch/GravityOnly/makeIC.py
similarity index 98%
rename from examples/DiscPatch/GravityOnly/makeIC.py
rename to examples/GravityTests/DiscPatch/GravityOnly/makeIC.py
index 5f9650f44277cf858021c9b628d68134c47a19b7..3abf4f87fc6b6f78ed1814be08ca0d8e39359a26 100644
--- a/examples/DiscPatch/GravityOnly/makeIC.py
+++ b/examples/GravityTests/DiscPatch/GravityOnly/makeIC.py
@@ -111,10 +111,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 numpy.random.seed(1234)
 
diff --git a/examples/DiscPatch/GravityOnly/run.sh b/examples/GravityTests/DiscPatch/GravityOnly/run.sh
similarity index 77%
rename from examples/DiscPatch/GravityOnly/run.sh
rename to examples/GravityTests/DiscPatch/GravityOnly/run.sh
index 9af1011ee653253f0d1b2cd26db0ac13cf11adc0..ab05b603254f62cdaebef49882407f441d006a2c 100755
--- a/examples/DiscPatch/GravityOnly/run.sh
+++ b/examples/GravityTests/DiscPatch/GravityOnly/run.sh
@@ -7,4 +7,4 @@ then
     python makeIC.py 1000
 fi
 
-../../swift -g -t 2 disc-patch.yml
+../../../swift --external-gravity --threads=2 disc-patch.yml
diff --git a/examples/DiscPatch/GravityOnly/test.pro b/examples/GravityTests/DiscPatch/GravityOnly/test.pro
similarity index 100%
rename from examples/DiscPatch/GravityOnly/test.pro
rename to examples/GravityTests/DiscPatch/GravityOnly/test.pro
diff --git a/examples/DiscPatch/HydroStatic/README b/examples/GravityTests/DiscPatch/HydroStatic/README
similarity index 100%
rename from examples/DiscPatch/HydroStatic/README
rename to examples/GravityTests/DiscPatch/HydroStatic/README
diff --git a/examples/DiscPatch/HydroStatic/disc-patch-icc.yml b/examples/GravityTests/DiscPatch/HydroStatic/disc-patch-icc.yml
similarity index 99%
rename from examples/DiscPatch/HydroStatic/disc-patch-icc.yml
rename to examples/GravityTests/DiscPatch/HydroStatic/disc-patch-icc.yml
index 983a7dcc103135ab4db61d6ea77701532226c101..aee54057cf2c5b9d178abac5599d9e4133652362 100644
--- a/examples/DiscPatch/HydroStatic/disc-patch-icc.yml
+++ b/examples/GravityTests/DiscPatch/HydroStatic/disc-patch-icc.yml
@@ -37,7 +37,8 @@ EoS:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Disc-Patch.hdf5       # The file to read
-
+  periodic:   1
+  
 # External potential parameters
 DiscPatchPotential:
   surface_density: 10.
diff --git a/examples/DiscPatch/HydroStatic/disc-patch.yml b/examples/GravityTests/DiscPatch/HydroStatic/disc-patch.yml
similarity index 99%
rename from examples/DiscPatch/HydroStatic/disc-patch.yml
rename to examples/GravityTests/DiscPatch/HydroStatic/disc-patch.yml
index 422e1cf910202e8f6dc0a9395fc7e36ce80443ed..8651ac09dbc4c4a97f0915ce7df6c678837b2f45 100644
--- a/examples/DiscPatch/HydroStatic/disc-patch.yml
+++ b/examples/GravityTests/DiscPatch/HydroStatic/disc-patch.yml
@@ -34,7 +34,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Disc-Patch-dynamic.hdf5       # The file to read
-
+  periodic:   1
+  
 # External potential parameters
 DiscPatchPotential:
   surface_density: 10.
diff --git a/examples/DiscPatch/HydroStatic/getGlass.sh b/examples/GravityTests/DiscPatch/HydroStatic/getGlass.sh
similarity index 100%
rename from examples/DiscPatch/HydroStatic/getGlass.sh
rename to examples/GravityTests/DiscPatch/HydroStatic/getGlass.sh
diff --git a/examples/DiscPatch/HydroStatic/makeIC.py b/examples/GravityTests/DiscPatch/HydroStatic/makeIC.py
similarity index 98%
rename from examples/DiscPatch/HydroStatic/makeIC.py
rename to examples/GravityTests/DiscPatch/HydroStatic/makeIC.py
index 8b4c55560c34e7bdb538f2b4732369216f91a087..dd50a821a2eb376c0785afd849a3ea575e349703 100644
--- a/examples/DiscPatch/HydroStatic/makeIC.py
+++ b/examples/GravityTests/DiscPatch/HydroStatic/makeIC.py
@@ -182,10 +182,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 # write gas particles
 grp0   = file.create_group("/PartType0")
 
diff --git a/examples/DiscPatch/HydroStatic/plotSolution.py b/examples/GravityTests/DiscPatch/HydroStatic/plotSolution.py
similarity index 100%
rename from examples/DiscPatch/HydroStatic/plotSolution.py
rename to examples/GravityTests/DiscPatch/HydroStatic/plotSolution.py
diff --git a/examples/DiscPatch/HydroStatic/run.sh b/examples/GravityTests/DiscPatch/HydroStatic/run.sh
similarity index 79%
rename from examples/DiscPatch/HydroStatic/run.sh
rename to examples/GravityTests/DiscPatch/HydroStatic/run.sh
index e1f47ecad54e7e171d78b7da080d56579e985d1e..44b933f79c1b0f699010932f285c62396de37a72 100755
--- a/examples/DiscPatch/HydroStatic/run.sh
+++ b/examples/GravityTests/DiscPatch/HydroStatic/run.sh
@@ -13,6 +13,6 @@ then
 fi
 
 # Run SWIFT
-../../swift -g -s -t 4 disc-patch-icc.yml 2>&1 | tee output.log
+../../../swift --external-gravity --hydro --threads=4 disc-patch-icc.yml 2>&1 | tee output.log
 
 python plotSolution.py
diff --git a/examples/DiscPatch/HydroStatic_1D/disc-patch-icc.yml b/examples/GravityTests/DiscPatch/HydroStatic_1D/disc-patch-icc.yml
similarity index 99%
rename from examples/DiscPatch/HydroStatic_1D/disc-patch-icc.yml
rename to examples/GravityTests/DiscPatch/HydroStatic_1D/disc-patch-icc.yml
index 450689034f4ae782cc74bf01dac93e723e5d2ce2..ea5d2e24eb93c64e21f37a8c137603b22885392c 100644
--- a/examples/DiscPatch/HydroStatic_1D/disc-patch-icc.yml
+++ b/examples/GravityTests/DiscPatch/HydroStatic_1D/disc-patch-icc.yml
@@ -34,7 +34,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Disc-Patch.hdf5       # The file to read
-
+  periodic:   1
+  
 # External potential parameters
 DiscPatchPotential:
   surface_density: 10.
diff --git a/examples/DiscPatch/HydroStatic_1D/makeIC.py b/examples/GravityTests/DiscPatch/HydroStatic_1D/makeIC.py
similarity index 98%
rename from examples/DiscPatch/HydroStatic_1D/makeIC.py
rename to examples/GravityTests/DiscPatch/HydroStatic_1D/makeIC.py
index 983a550a3442c6470611792081a5884d38023a6a..b193c85e50d3526b8518cac06b9b00c3071c383a 100644
--- a/examples/DiscPatch/HydroStatic_1D/makeIC.py
+++ b/examples/GravityTests/DiscPatch/HydroStatic_1D/makeIC.py
@@ -168,10 +168,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 # write gas particles
 grp0   = file.create_group("/PartType0")
 
diff --git a/examples/DiscPatch/HydroStatic_1D/plotSolution.py b/examples/GravityTests/DiscPatch/HydroStatic_1D/plotSolution.py
similarity index 100%
rename from examples/DiscPatch/HydroStatic_1D/plotSolution.py
rename to examples/GravityTests/DiscPatch/HydroStatic_1D/plotSolution.py
diff --git a/examples/DiscPatch/HydroStatic_1D/run.sh b/examples/GravityTests/DiscPatch/HydroStatic_1D/run.sh
similarity index 71%
rename from examples/DiscPatch/HydroStatic_1D/run.sh
rename to examples/GravityTests/DiscPatch/HydroStatic_1D/run.sh
index e9d073a6cc7a06ec9ebd9fdb556c44778d32c7f4..a76db9422e12ee18251083ee8cf26dd28e861e69 100755
--- a/examples/DiscPatch/HydroStatic_1D/run.sh
+++ b/examples/GravityTests/DiscPatch/HydroStatic_1D/run.sh
@@ -8,6 +8,6 @@ then
 fi
 
 # Run SWIFT
-../../swift -g -s -t 4 disc-patch-icc.yml 2>&1 | tee output.log
+../../../swift --external-gravity --hydro --threads=4 disc-patch-icc.yml 2>&1 | tee output.log
 
 python plotSolution.py
diff --git a/examples/ExternalPointMass/energy_plot.py b/examples/GravityTests/ExternalPointMass/energy_plot.py
similarity index 95%
rename from examples/ExternalPointMass/energy_plot.py
rename to examples/GravityTests/ExternalPointMass/energy_plot.py
index 1863305614c226f64faac3d86fa2f809d49b9d74..5644e48f8bd954800526369cc152da7024d069dd 100644
--- a/examples/ExternalPointMass/energy_plot.py
+++ b/examples/GravityTests/ExternalPointMass/energy_plot.py
@@ -91,8 +91,8 @@ for i in range(402):
     E_tot_snap[i] = E_kin_snap[i] + E_pot_snap[i]
     Lz_snap[i] = np.sum(Lz)
 
-print "Starting energy:", E_kin_stats[0], E_pot_stats[0], E_tot_stats[0]
-print "Ending   energy:", E_kin_stats[-1], E_pot_stats[-1], E_tot_stats[-1]
+print("Starting energy:", E_kin_stats[0], E_pot_stats[0], E_tot_stats[0])
+print("Ending   energy:", E_kin_stats[-1], E_pot_stats[-1], E_tot_stats[-1])
     
 # Plot energy evolution
 figure()
diff --git a/examples/ExternalPointMass/externalPointMass.yml b/examples/GravityTests/ExternalPointMass/externalPointMass.yml
similarity index 96%
rename from examples/ExternalPointMass/externalPointMass.yml
rename to examples/GravityTests/ExternalPointMass/externalPointMass.yml
index de05a9ff3c10afa7871ebeafbf4d8d272056d39f..c9b1ef34d618eddfc2ba410785deb4919ed1b835 100644
--- a/examples/ExternalPointMass/externalPointMass.yml
+++ b/examples/GravityTests/ExternalPointMass/externalPointMass.yml
@@ -31,11 +31,13 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  PointMass.hdf5        # The file to read
+  periodic:   0
   shift:      [50.,50.,50.]         # A shift to apply to all particles read from the ICs (in internal units).
 
 # External potential parameters
 PointMassPotential:
   position:        [50.,50.,50.]    # location of external point mass in internal units
+  useabspos:       1        # Position is absolute
   mass:            1e10     # mass of external point mass in internal units
   timestep_mult:   0.03     # controls time step
 
diff --git a/examples/ExternalPointMass/makeIC.py b/examples/GravityTests/ExternalPointMass/makeIC.py
similarity index 84%
rename from examples/ExternalPointMass/makeIC.py
rename to examples/GravityTests/ExternalPointMass/makeIC.py
index fdc5b1fd67ffcbd85beae3a9d6d1274d3d48c279..6780430d22e39350e7efeb52190708c78141bd4f 100644
--- a/examples/ExternalPointMass/makeIC.py
+++ b/examples/GravityTests/ExternalPointMass/makeIC.py
@@ -36,16 +36,16 @@ const_unit_length_in_cgs   =   (1000*PARSEC_IN_CGS)
 const_unit_mass_in_cgs     =   (SOLAR_MASS_IN_CGS)
 const_unit_velocity_in_cgs =   (1e5)
 
-print "UnitMass_in_cgs:     ", const_unit_mass_in_cgs 
-print "UnitLength_in_cgs:   ", const_unit_length_in_cgs
-print "UnitVelocity_in_cgs: ", const_unit_velocity_in_cgs
-print "UnitTime_in_cgs:     ", const_unit_length_in_cgs / const_unit_velocity_in_cgs
+print("UnitMass_in_cgs:     ", const_unit_mass_in_cgs) 
+print("UnitLength_in_cgs:   ", const_unit_length_in_cgs)
+print("UnitVelocity_in_cgs: ", const_unit_velocity_in_cgs)
+print("UnitTime_in_cgs:     ", const_unit_length_in_cgs / const_unit_velocity_in_cgs)
 
 # derived units
 const_unit_time_in_cgs = (const_unit_length_in_cgs / const_unit_velocity_in_cgs)
 const_G                = ((NEWTON_GRAVITY_CGS*const_unit_mass_in_cgs*const_unit_time_in_cgs*const_unit_time_in_cgs/(const_unit_length_in_cgs*const_unit_length_in_cgs*const_unit_length_in_cgs)))
-print '---------------------'
-print 'G in internal units: ', const_G
+print('---------------------')
+print('G in internal units: ', const_G)
 
 
 # Parameters
@@ -53,7 +53,7 @@ periodic   = 1            # 1 For periodic box
 boxSize    = 100.         # 
 max_radius = boxSize / 4. # maximum radius of particles
 Mass       = 1e10         
-print "Mass at the centre:  ", Mass
+print("Mass at the centre:  ", Mass)
 
 numPart = int(sys.argv[1])  # Number of particles
 mass    = 1.
@@ -79,9 +79,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
 
 #Units
 grp = file.create_group("/Units")
@@ -96,9 +93,9 @@ grp1 = file.create_group("/PartType1")
 
 #generate particle positions
 radius = max_radius * (numpy.random.rand(numPart))**(1./3.)
-print '---------------------'
-print 'Radius: minimum = ',min(radius)
-print 'Radius: maximum = ',max(radius)
+print('---------------------')
+print('Radius: minimum = ',min(radius))
+print('Radius: maximum = ',max(radius))
 radius = numpy.sort(radius)
 r      = numpy.zeros((numPart, 3))
 r[:,0] = radius
@@ -107,9 +104,9 @@ r[:,0] = radius
 speed  = numpy.sqrt(const_G * Mass / radius)
 omega  = speed / radius
 period = 2.*math.pi/omega
-print '---------------------'
-print 'Period: minimum = ',min(period)
-print 'Period: maximum = ',max(period)
+print('---------------------')
+print('Period: minimum = ',min(period))
+print('Period: maximum = ',max(period))
 
 v      = numpy.zeros((numPart, 3))
 v[:,0] = -omega * r[:,1]
diff --git a/examples/ExternalPointMass/run.sh b/examples/GravityTests/ExternalPointMass/run.sh
similarity index 75%
rename from examples/ExternalPointMass/run.sh
rename to examples/GravityTests/ExternalPointMass/run.sh
index e074c384c4e002a161c7d8258e9068663204099f..6f96200e45ceabcece487005560c293cdc084780 100755
--- a/examples/ExternalPointMass/run.sh
+++ b/examples/GravityTests/ExternalPointMass/run.sh
@@ -8,6 +8,6 @@ then
 fi
 
 rm -rf pointMass_*.hdf5
-../swift -g -t 1 externalPointMass.yml 2>&1 | tee output.log
+../../swift --external-gravity --threads=1 externalPointMass.yml 2>&1 | tee output.log
 
 python energy_plot.py
diff --git a/examples/Gravity_glass/README b/examples/GravityTests/Gravity_glass/README
similarity index 100%
rename from examples/Gravity_glass/README
rename to examples/GravityTests/Gravity_glass/README
diff --git a/examples/Gravity_glass/makeIC.py b/examples/GravityTests/Gravity_glass/makeIC.py
similarity index 96%
rename from examples/Gravity_glass/makeIC.py
rename to examples/GravityTests/Gravity_glass/makeIC.py
index 1a3fde9e2868c8881923fa61d1c308bca0f2f095..f573c79b19a5e3655d4f55f761ef20a6468342de 100644
--- a/examples/Gravity_glass/makeIC.py
+++ b/examples/GravityTests/Gravity_glass/makeIC.py
@@ -53,10 +53,6 @@ grp.attrs["MassTable"] = [0.0, mass, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Gravity_glass/uniform_DM_box.yml b/examples/GravityTests/Gravity_glass/uniform_DM_box.yml
similarity index 98%
rename from examples/Gravity_glass/uniform_DM_box.yml
rename to examples/GravityTests/Gravity_glass/uniform_DM_box.yml
index 8f3ef6f025afb1a92320eeb702b50e8bf4befce6..00a5864cdb6ff0897501248437b3cc00be0f7acf 100644
--- a/examples/Gravity_glass/uniform_DM_box.yml
+++ b/examples/GravityTests/Gravity_glass/uniform_DM_box.yml
@@ -42,3 +42,4 @@ Statistics:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./uniform_DM_box.hdf5
+  periodic:   1
\ No newline at end of file
diff --git a/examples/GravityTests/Hernquist_circularorbit/hernquistcirc.yml b/examples/GravityTests/Hernquist_circularorbit/hernquistcirc.yml
new file mode 100755
index 0000000000000000000000000000000000000000..5e81d180003283ecb74209b19e1ff3db8097b08f
--- /dev/null
+++ b/examples/GravityTests/Hernquist_circularorbit/hernquistcirc.yml
@@ -0,0 +1,38 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.988e+33    # Grams
+  UnitLength_in_cgs:   3.086e+21 # Centimeters
+  UnitVelocity_in_cgs: 1e5       # Centimeters per second
+  UnitCurrent_in_cgs:  1         # Amperes
+  UnitTemp_in_cgs:     1         # Kelvin
+
+# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
+TimeIntegration:
+  time_begin:          0.      # The starting time of the simulation (in internal units).
+  time_end:            2.0     # The end time of the simulation (in internal units).
+  dt_min:              1e-10    # The minimal time-step size of the simulation (in internal units).
+  dt_max:              1e0    # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            output  # Common part of the name of output files
+  time_first:          0.      # Time of the first output (in internal units)
+  delta_time:          1e-3    # Time difference between consecutive outputs (in internal units)
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1e0    # Time between statistics output
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:          circularorbitshernquist.hdf5 # The file to read
+  periodic:           0
+
+# Hernquist potential parameters
+HernquistPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
+  position:        [0.,0.,0.]    # Location of centre of isothermal potential with respect to centre of the box (if 0) otherwise absolute (if 1) (internal units)
+  mass:            2e12     # Mass of the Hernquist potential
+  scalelength:     10.0     # Scale length of the potential
+  timestep_mult:   0.005     # Dimensionless pre-factor for the time-step condition
+  epsilon:         0.1      # Softening size (internal units)
diff --git a/examples/GravityTests/Hernquist_circularorbit/makeIC.py b/examples/GravityTests/Hernquist_circularorbit/makeIC.py
new file mode 100755
index 0000000000000000000000000000000000000000..474450f0e23704bfc43730872a978107f28704e9
--- /dev/null
+++ b/examples/GravityTests/Hernquist_circularorbit/makeIC.py
@@ -0,0 +1,81 @@
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+from galpy.potential import NFWPotential
+from galpy.orbit import Orbit
+from galpy.util import bovy_conversion
+import numpy as np
+import matplotlib.pyplot as plt
+from astropy import units
+import h5py as h5
+
+C = 8.0
+M_200 = 2.0
+N_PARTICLES = 3
+print("Initial conditions written to 'test_nfw.hdf5'")
+
+pos = np.zeros((3, 3))
+pos[0, 2] = 50.0
+pos[1, 2] = 10.0
+pos[2, 2] = 2.0
+pos = pos + 500.0
+vel = np.zeros((3, 3))
+vel[0, 1] = 348.0
+vel[1, 1] = 466.9
+vel[2, 1] = 348.0
+ids = np.array([1.0, 2.0, 3.0])
+mass = np.array([1.0, 1.0, 1.0])
+
+# File
+file = h5.File("circularorbitshernquist.hdf5", "w")
+
+# Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = 3.086e21
+grp.attrs["Unit mass in cgs (U_M)"] = 1.988e33
+grp.attrs["Unit time in cgs (U_t)"] = 3.086e16
+grp.attrs["Unit current in cgs (U_I)"] = 1.0
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.0
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = 1000.0
+grp.attrs["NumPart_Total"] = [0, N_PARTICLES, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [0, N_PARTICLES, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["Dimension"] = 3
+
+# Runtime parameters
+grp = file.create_group("/RuntimePars")
+grp.attrs["PeriodicBoundariesOn"] = 1
+
+# Particle group
+grp1 = file.create_group("/PartType1")
+ds = grp1.create_dataset("Velocities", (N_PARTICLES, 3), "f", data=vel)
+
+ds = grp1.create_dataset("Masses", (N_PARTICLES,), "f", data=mass)
+
+ds = grp1.create_dataset("ParticleIDs", (N_PARTICLES,), "L", data=ids)
+
+ds = grp1.create_dataset("Coordinates", (N_PARTICLES, 3), "d", data=pos)
+
+file.close()
diff --git a/examples/GravityTests/Hernquist_circularorbit/plotprog.py b/examples/GravityTests/Hernquist_circularorbit/plotprog.py
new file mode 100755
index 0000000000000000000000000000000000000000..a19c66e7f30e0e4012a23a4d38dd23045deea6e2
--- /dev/null
+++ b/examples/GravityTests/Hernquist_circularorbit/plotprog.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from scipy.integrate import odeint
+
+t = np.linspace(0, 40, 1e5)
+y0 = [0, 10]
+a = 30.0
+G = 4.300927e-06
+M = 1e15
+GM = G * M
+
+
+lengthrun = 2001
+numbpar = 3
+
+radius = np.zeros((numbpar, lengthrun))
+xx = np.zeros((numbpar, lengthrun))
+yy = np.zeros((numbpar, lengthrun))
+zz = np.zeros((numbpar, lengthrun))
+time = np.zeros(lengthrun)
+for i in range(0, lengthrun):
+    Data = h5py.File("output_%04d.hdf5" % i, "r")
+    header = Data["Header"]
+    time[i] = header.attrs["Time"]
+    particles = Data["PartType1"]
+    positions = particles["Coordinates"]
+    xx[:, i] = positions[:, 0] - 500.0
+    yy[:, i] = positions[:, 1] - 500.0
+    zz[:, i] = positions[:, 2] - 500.0
+
+col = ["b", "r", "c", "y", "k"]
+
+for i in range(0, numbpar):
+    plt.plot(xx[i, :], yy[i, :], col[i])
+
+plt.ylabel("y (kpc)")
+plt.xlabel("x (kpc)")
+plt.savefig("xyplot.png")
+plt.close()
+
+
+for i in range(0, numbpar):
+    plt.plot(xx[i, :], zz[i, :], col[i])
+
+plt.ylabel("z (kpc)")
+plt.xlabel("x (kpc)")
+plt.savefig("xzplot.png")
+plt.close()
+
+for i in range(0, numbpar):
+    plt.plot(yy[i, :], zz[i, :], col[i])
+
+plt.ylabel("z (kpc)")
+plt.xlabel("y (kpc)")
+plt.savefig("yzplot.png")
+plt.close()
diff --git a/examples/GravityTests/Hernquist_circularorbit/run.sh b/examples/GravityTests/Hernquist_circularorbit/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..15cd929a5d2ba62efd4f7af378c9cad4788e7c5f
--- /dev/null
+++ b/examples/GravityTests/Hernquist_circularorbit/run.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+if [ ! -e circularorbitshernquist.hdf5 ]
+then 
+    echo "Generate initial conditions for circular orbits"
+    if command -v python3 &>/dev/null; then
+        python3 makeIC.py
+    else 
+        python makeIC.py
+    fi
+
+fi
+
+# self gravity G, external potential g, hydro s, threads t and high verbosity v
+../../swift --external-gravity --threads=6 hernquistcirc.yml 2>&1 | tee output.log
+
+
+echo "Save plots of the circular orbits"
+if command -v python3 &>/dev/null; then
+    python3 plotprog.py
+else 
+    python plotprog.py
+fi
diff --git a/examples/GravityTests/Hernquist_radialinfall/README b/examples/GravityTests/Hernquist_radialinfall/README
new file mode 100644
index 0000000000000000000000000000000000000000..be22a1a11a5b1e0538723781607aa374644a4e0f
--- /dev/null
+++ b/examples/GravityTests/Hernquist_radialinfall/README
@@ -0,0 +1,3 @@
+This example generates 5 particles at radii of 10, 20, 30, 40 and 50 kpc
+without velocitiy and follows the evolution of these particles in an Hernquist
+potential as they are free falling.
diff --git a/examples/GravityTests/Hernquist_radialinfall/hernquist.yml b/examples/GravityTests/Hernquist_radialinfall/hernquist.yml
new file mode 100644
index 0000000000000000000000000000000000000000..adea54ed9a33ee889b39bb519c8098917b33ef9f
--- /dev/null
+++ b/examples/GravityTests/Hernquist_radialinfall/hernquist.yml
@@ -0,0 +1,39 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e33    # M_sun
+  UnitLength_in_cgs:   3.08567758e21 # kpc
+  UnitVelocity_in_cgs: 1e5           # km/s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   40.    # The end time of the simulation (in internal units).
+  dt_min:     9e-10  # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1e-3 # Time between statistics output
+  
+# Parameters governing the snapshots
+Snapshots:
+  basename:            hernquist # Common part of the name of output files
+  time_first:          0.         # Time of the first output (in internal units)
+  delta_time:          0.02       # Time difference between consecutive outputs (in internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  Hernquist.hdf5       # The file to read
+  periodic:   1
+  shift:      [200.,200.,200.]      # Shift all particles to be in the potential
+ 
+# External potential parameters
+HernquistPotential:
+  useabspos:       0          # Whether to use absolute position (1) or relative potential to centre of box (0)
+  position:        [0.,0.,0.]
+  mass:            1e9
+  scalelength:     1.0
+  timestep_mult:   0.01      # controls time step
+  epsilon:         2.0         # No softening at the centre of the halo
diff --git a/examples/GravityTests/Hernquist_radialinfall/makeIC.py b/examples/GravityTests/Hernquist_radialinfall/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..567e15a95302bc8848c1d026b82dc5be54c7a0c6
--- /dev/null
+++ b/examples/GravityTests/Hernquist_radialinfall/makeIC.py
@@ -0,0 +1,167 @@
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+##############################################################################
+
+import h5py
+import sys
+import numpy
+import math
+import random
+import numpy as np
+
+# Generates N particles in a spherical distribution centred on [0,0,0], to be moved in an isothermal potential
+# usage: python makeIC.py 1000 0 : generate 1000 particles on circular orbits
+#        python makeIC.py 1000 1 : generate 1000 particles with Lz/L uniform in [0,1]
+# all particles move in the xy plane, and start at y=0
+
+# physical constants in cgs
+NEWTON_GRAVITY_CGS = 6.67408e-8
+SOLAR_MASS_IN_CGS = 1.98848e33
+PARSEC_IN_CGS = 3.08567758e18
+YEAR_IN_CGS = 3.15569252e7
+
+# choice of units
+const_unit_length_in_cgs = 1000 * PARSEC_IN_CGS
+const_unit_mass_in_cgs = SOLAR_MASS_IN_CGS
+const_unit_velocity_in_cgs = 1e5
+
+
+# Properties of the Hernquist potential
+Mass = 1e15
+scaleLength = 30.0  # kpc
+
+
+# derived units
+const_unit_time_in_cgs = const_unit_length_in_cgs / const_unit_velocity_in_cgs
+const_G = (
+    NEWTON_GRAVITY_CGS
+    * const_unit_mass_in_cgs
+    * const_unit_time_in_cgs
+    * const_unit_time_in_cgs
+    / (const_unit_length_in_cgs * const_unit_length_in_cgs * const_unit_length_in_cgs)
+)
+print("G=", const_G)
+
+
+def hernquistcircvel(r, M, a):
+    """ Function that calculates the circular velocity in a 
+    Hernquist potential.
+    @param r: radius from centre of potential
+    @param M: mass of the Hernquist potential
+    @param a: Scale length of the potential
+    @return: circular velocity
+    """
+    return (const_G * M * r) ** 0.5 / (r + a)
+
+
+# Parameters
+periodic = 1  # 1 For periodic box
+boxSize = 400.0  #  [kpc]
+Radius = 100.0  # maximum radius of particles [kpc]
+G = const_G
+
+N = 5
+L = N ** (1.0 / 3.0)
+
+fileName = "Hernquist.hdf5"
+
+
+# ---------------------------------------------------
+numPart = N
+mass = 1
+
+# --------------------------------------------------
+
+# File
+file = h5py.File(fileName, "w")
+
+# Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = const_unit_length_in_cgs
+grp.attrs["Unit mass in cgs (U_M)"] = const_unit_mass_in_cgs
+grp.attrs["Unit time in cgs (U_t)"] = (
+    const_unit_length_in_cgs / const_unit_velocity_in_cgs
+)
+grp.attrs["Unit current in cgs (U_I)"] = 1.0
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.0
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = boxSize
+grp.attrs["NumPart_Total"] = [0, numPart, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [0, numPart, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["Dimension"] = 3
+
+# set seed for random number
+numpy.random.seed(1234)
+
+# Particle group
+grp1 = file.create_group("/PartType1")
+# generate particle positions
+# radius = Radius * (numpy.random.rand(N))**(1./3.) + 10.
+radius = np.zeros(N)
+radius[0] = 10
+radius[1] = 20
+radius[2] = 30
+radius[3] = 40
+radius[4] = 50
+# this part is not even used:
+# ctheta = -1. + 2 * numpy.random.rand(N)
+# stheta = numpy.sqrt(1.-ctheta**2)
+# phi    =  2 * math.pi * numpy.random.rand(N)
+# end
+r = numpy.zeros((numPart, 3))
+r[:, 0] = radius
+
+# import matplotlib.pyplot as plt
+# plt.plot(r[:,0],'.')
+# plt.show()
+
+# print('Mass = ', Mass)
+# print('radius = ', radius)
+# print('scaleLength = ',scaleLength)
+#
+v = numpy.zeros((numPart, 3))
+# v[:,0] = hernquistcircvel(radius,Mass,scaleLength)
+omega = v[:, 0] / radius
+period = 2.0 * math.pi / omega
+print("period = minimum = ", min(period), " maximum = ", max(period))
+print("Circular velocity = minimum =", min(v[:, 0]), " maximum = ", max(v[:, 0]))
+
+omegav = omega
+
+v[:, 0] = -omegav * r[:, 1]
+v[:, 1] = omegav * r[:, 0]
+
+ds = grp1.create_dataset("Velocities", (numPart, 3), "f", data=v)
+
+m = numpy.full((numPart,), mass, dtype="f")
+ds = grp1.create_dataset("Masses", (numPart,), "f", data=m)
+
+ids = 1 + numpy.linspace(0, numPart, numPart, endpoint=False)
+ds = grp1.create_dataset("ParticleIDs", (numPart,), "L", data=ids)
+
+ds = grp1.create_dataset("Coordinates", (numPart, 3), "d", data=r)
+
+
+file.close()
diff --git a/examples/GravityTests/Hernquist_radialinfall/plotprog.py b/examples/GravityTests/Hernquist_radialinfall/plotprog.py
new file mode 100755
index 0000000000000000000000000000000000000000..d8de00a6b694bb33bf96ef7065c972aa6bb3f6cb
--- /dev/null
+++ b/examples/GravityTests/Hernquist_radialinfall/plotprog.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from scipy.integrate import odeint
+
+
+lengthrun = 2001
+numbpar = 5
+
+radius = np.zeros((numbpar, lengthrun))
+time = np.zeros(lengthrun)
+for i in range(0, lengthrun):
+    Data = h5py.File("hernquist_%04d.hdf5" % i, "r")
+    header = Data["Header"]
+    time[i] = header.attrs["Time"]
+    particles = Data["PartType1"]
+    positions = particles["Coordinates"]
+    radius[:, i] = positions[:, 0] - 200.0
+
+col = ["b", "r", "c", "y", "k"]
+
+for i in range(0, numbpar):
+    plt.plot(time, radius[i, :], col[i])
+    plt.axhline(np.max(radius[i, :]), color=col[i], linestyle="--")
+    plt.axhline(-np.max(radius[i, :]), color=col[i], linestyle="--")
+
+
+plt.ylabel("Radial distance (kpc)")
+plt.xlabel("Simulation time (internal units)")
+plt.savefig("radial_infall.png")
+plt.close()
diff --git a/examples/GravityTests/Hernquist_radialinfall/run.sh b/examples/GravityTests/Hernquist_radialinfall/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7381f3ecd4f8aa6711d33607cec52d5cdfedccaf
--- /dev/null
+++ b/examples/GravityTests/Hernquist_radialinfall/run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+# Generate the initial conditions if they are not present.
+if [ ! -e Hernquist.hdf5 ]
+then
+    echo "Generate initial conditions for radial orbits"
+    if command -v python3 &>/dev/null; then
+        python3 makeIC.py 
+    else 
+        python makeIC.py
+    fi
+fi
+
+rm -rf hernquist_*.hdf5
+../../swift --external-gravity --threads=1 hernquist.yml 2>&1 | tee output.log
+
+
+
+echo "Make plots of the radially free falling particles" 
+if command -v python3 &>/dev/null; then
+    python3 plotprog.py 
+else 
+    python plotprog.py
+fi
diff --git a/examples/HydrostaticHalo/README b/examples/GravityTests/HydrostaticHalo/README
similarity index 100%
rename from examples/HydrostaticHalo/README
rename to examples/GravityTests/HydrostaticHalo/README
diff --git a/examples/HydrostaticHalo/density_profile.py b/examples/GravityTests/HydrostaticHalo/density_profile.py
similarity index 100%
rename from examples/HydrostaticHalo/density_profile.py
rename to examples/GravityTests/HydrostaticHalo/density_profile.py
diff --git a/examples/HydrostaticHalo/hydrostatic.yml b/examples/GravityTests/HydrostaticHalo/hydrostatic.yml
similarity index 98%
rename from examples/HydrostaticHalo/hydrostatic.yml
rename to examples/GravityTests/HydrostaticHalo/hydrostatic.yml
index 0cc11d0d8708b518b8b0b3a8df1374b6a5ead7e2..874d6344cf5787bb310b6a1b730acb3455a8b6a6 100644
--- a/examples/HydrostaticHalo/hydrostatic.yml
+++ b/examples/GravityTests/HydrostaticHalo/hydrostatic.yml
@@ -31,7 +31,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Hydrostatic.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 IsothermalPotential:
   vrot:            200.     # rotation speed of isothermal potential in internal units
diff --git a/examples/HydrostaticHalo/internal_energy_profile.py b/examples/GravityTests/HydrostaticHalo/internal_energy_profile.py
similarity index 100%
rename from examples/HydrostaticHalo/internal_energy_profile.py
rename to examples/GravityTests/HydrostaticHalo/internal_energy_profile.py
diff --git a/examples/HydrostaticHalo/makeIC.py b/examples/GravityTests/HydrostaticHalo/makeIC.py
similarity index 98%
rename from examples/HydrostaticHalo/makeIC.py
rename to examples/GravityTests/HydrostaticHalo/makeIC.py
index d5081ac84473edc87857c6872278b4d0ca6389b1..b8a4036b77c430866f700047fd06bf2c8de490e7 100644
--- a/examples/HydrostaticHalo/makeIC.py
+++ b/examples/GravityTests/HydrostaticHalo/makeIC.py
@@ -91,10 +91,6 @@ grp.attrs["Unit current in cgs (U_I)"] = 1.
 grp.attrs["Unit temperature in cgs (U_T)"] = 1.
 
 
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 np.random.seed(1234)
 
diff --git a/examples/HydrostaticHalo/run.sh b/examples/GravityTests/HydrostaticHalo/run.sh
similarity index 88%
rename from examples/HydrostaticHalo/run.sh
rename to examples/GravityTests/HydrostaticHalo/run.sh
index 82584282559c1fceb0492aada671ff83fb74c924..a3f8b04b1115e316736c1177ecfd8288ed2a045e 100755
--- a/examples/HydrostaticHalo/run.sh
+++ b/examples/GravityTests/HydrostaticHalo/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run for 10 dynamical times
-../swift -g -s -t 1 hydrostatic.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --threads=1 hydrostatic.yml 2>&1 | tee output.log
 
 echo "Plotting density profiles"
 mkdir plots
diff --git a/examples/HydrostaticHalo/test_energy_conservation.py b/examples/GravityTests/HydrostaticHalo/test_energy_conservation.py
similarity index 100%
rename from examples/HydrostaticHalo/test_energy_conservation.py
rename to examples/GravityTests/HydrostaticHalo/test_energy_conservation.py
diff --git a/examples/HydrostaticHalo/velocity_profile.py b/examples/GravityTests/HydrostaticHalo/velocity_profile.py
similarity index 100%
rename from examples/HydrostaticHalo/velocity_profile.py
rename to examples/GravityTests/HydrostaticHalo/velocity_profile.py
diff --git a/examples/IsothermalPotential/README b/examples/GravityTests/IsothermalPotential/README
similarity index 100%
rename from examples/IsothermalPotential/README
rename to examples/GravityTests/IsothermalPotential/README
diff --git a/examples/IsothermalPotential/energy_plot.py b/examples/GravityTests/IsothermalPotential/energy_plot.py
similarity index 98%
rename from examples/IsothermalPotential/energy_plot.py
rename to examples/GravityTests/IsothermalPotential/energy_plot.py
index dab30715fbdaa0393f62c764ba552bbe4106325d..d157e4233cae2221f23d37f6bdf0c30a2486f972 100644
--- a/examples/IsothermalPotential/energy_plot.py
+++ b/examples/GravityTests/IsothermalPotential/energy_plot.py
@@ -86,7 +86,7 @@ for i in range(402):
 
     time_snap[i] = f["Header"].attrs["Time"]
     E_kin_snap[i] = np.sum(0.5 * mass * (vel_x[:]**2 + vel_y[:]**2 + vel_z[:]**2))
-    E_pot_snap[i] = np.sum(-mass * Vrot**2 *  log(r))
+    E_pot_snap[i] = np.sum(mass * Vrot**2 *  log(r))
     E_tot_snap[i] = E_kin_snap[i] + E_pot_snap[i]
     Lz_snap[i] = np.sum(Lz)
 
diff --git a/examples/IsothermalPotential/isothermal.yml b/examples/GravityTests/IsothermalPotential/isothermal.yml
similarity index 90%
rename from examples/IsothermalPotential/isothermal.yml
rename to examples/GravityTests/IsothermalPotential/isothermal.yml
index 5f626ff72e979ad0f3d404e01002be6b6018c758..4f8d98a1f7615659ddb3c922b149fc2db04415c6 100644
--- a/examples/IsothermalPotential/isothermal.yml
+++ b/examples/GravityTests/IsothermalPotential/isothermal.yml
@@ -26,10 +26,13 @@ Snapshots:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  Isothermal.hdf5       # The file to read
+  periodic:   1
   shift:      [200.,200.,200.]      # Shift all particles to be in the potential
  
 # External potential parameters
 IsothermalPotential:
+  useabspos:       0          # Whether to use absolute position (1) or relative potential to centre of box (0)
+  position:        [0.,0.,0.]
   vrot:            200.       # rotation speed of isothermal potential in internal units
   timestep_mult:   0.01       # controls time step
   epsilon:         0.         # No softening at the centre of the halo
diff --git a/examples/IsothermalPotential/makeIC.py b/examples/GravityTests/IsothermalPotential/makeIC.py
similarity index 97%
rename from examples/IsothermalPotential/makeIC.py
rename to examples/GravityTests/IsothermalPotential/makeIC.py
index eab16d21e6a4abd077dc0f4a015a4577427a3591..ebcbb6dda11f1a2d88dfcfb717578f114f3512e9 100644
--- a/examples/IsothermalPotential/makeIC.py
+++ b/examples/GravityTests/IsothermalPotential/makeIC.py
@@ -97,10 +97,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 # set seed for random number
 numpy.random.seed(1234)
 
diff --git a/examples/IsothermalPotential/run.sh b/examples/GravityTests/IsothermalPotential/run.sh
similarity index 77%
rename from examples/IsothermalPotential/run.sh
rename to examples/GravityTests/IsothermalPotential/run.sh
index a5f03f32f82e27660d0a950335d731cf0ff7401d..4e6a502eddcc081549bc1c967cde9edab9f0b835 100755
--- a/examples/IsothermalPotential/run.sh
+++ b/examples/GravityTests/IsothermalPotential/run.sh
@@ -8,6 +8,6 @@ then
 fi
 
 rm -rf Isothermal_*.hdf5
-../swift -g -t 1 isothermal.yml 2>&1 | tee output.log
+../../swift --external-gravity --threads=1 isothermal.yml 2>&1 | tee output.log
 
 python energy_plot.py
diff --git a/examples/GravityTests/NFW_Halo/README b/examples/GravityTests/NFW_Halo/README
new file mode 100755
index 0000000000000000000000000000000000000000..059d35c9a94d7851233dd0fa423abca3a1d7cddf
--- /dev/null
+++ b/examples/GravityTests/NFW_Halo/README
@@ -0,0 +1,5 @@
+This just provides a test that the NFW potential is giving the correct orbit 
+for an elliptical orbit as calculated by Jo Bovy's galpy package. If 
+galpy is not installed on your system you can install it by using:
+pp install galpy --user
+
diff --git a/examples/GravityTests/NFW_Halo/makeIC.py b/examples/GravityTests/NFW_Halo/makeIC.py
new file mode 100755
index 0000000000000000000000000000000000000000..68d8108f84aa759fe16956226122d53765c5ed1d
--- /dev/null
+++ b/examples/GravityTests/NFW_Halo/makeIC.py
@@ -0,0 +1,75 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Ashley Kelly ()
+#                    Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+import numpy as np
+import matplotlib.pyplot as plt
+from astropy import units
+import h5py as h5
+
+C = 8.0
+M_200 = 2.0
+N_PARTICLES = 1
+
+
+print("\nInitial conditions written to 'test_nfw.hdf5'")
+
+pos = np.array([8.0, 0.0, 0.0]) + 500.0
+vel = np.array([0.0, 240.0, 5.0])
+ids = np.array([1.0])
+mass = np.array([1.0])
+
+# File
+file = h5.File("test_nfw.hdf5", "w")
+
+# Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = 3.086e21
+grp.attrs["Unit mass in cgs (U_M)"] = 1.988e33
+grp.attrs["Unit time in cgs (U_t)"] = 3.086e16
+grp.attrs["Unit current in cgs (U_I)"] = 1.0
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.0
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = 1000.0
+grp.attrs["NumPart_Total"] = [0, N_PARTICLES, 0, 0, 0, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [0, N_PARTICLES, 0, 0, 0, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["Dimension"] = 3
+
+# Runtime parameters
+grp = file.create_group("/RuntimePars")
+grp.attrs["PeriodicBoundariesOn"] = 1
+
+# Particle group
+grp1 = file.create_group("/PartType1")
+ds = grp1.create_dataset("Velocities", (N_PARTICLES, 3), "f", data=vel)
+
+ds = grp1.create_dataset("Masses", (N_PARTICLES,), "f", data=mass)
+
+ds = grp1.create_dataset("ParticleIDs", (N_PARTICLES,), "L", data=ids)
+
+ds = grp1.create_dataset("Coordinates", (N_PARTICLES, 3), "d", data=pos)
+
+file.close()
diff --git a/examples/GravityTests/NFW_Halo/makePlots.py b/examples/GravityTests/NFW_Halo/makePlots.py
new file mode 100755
index 0000000000000000000000000000000000000000..5e6f24d7a72dafe47d26ccb1b2d33b136affad98
--- /dev/null
+++ b/examples/GravityTests/NFW_Halo/makePlots.py
@@ -0,0 +1,73 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Ashley Kelly ()
+#                    Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+from galpy.potential import NFWPotential
+from galpy.orbit import Orbit
+import numpy as np
+import matplotlib.pyplot as plt
+from astropy import units
+import h5py as h5
+
+C = 8.0
+M_200 = 2.0
+
+
+def read_data():
+    R = np.array([])
+    z = np.array([])
+    for frame in range(0, 599, 1):
+        try:
+            sim = h5.File("output_%04d.hdf5" % frame, "r")
+        except IOError:
+            break
+
+        boxSize = sim["/Header"].attrs["BoxSize"][0]
+        pos = sim["/PartType1/Coordinates"][:, :] - boxSize / 2.0
+        R = np.append(R, np.sqrt(pos[0, 0] ** 2 + pos[0, 1] ** 2))
+        z = np.append(z, pos[0, 2])
+    return (R, z)
+
+
+def galpy_nfw_orbit():
+    # Setting up the potential
+    nfw = NFWPotential(conc=C, mvir=M_200, H=70.0, wrtcrit=True, overdens=200)
+    nfw.turn_physical_on()
+    vxvv = [
+        8.0 * units.kpc,
+        0.0 * units.km / units.s,
+        240.0 * units.km / units.s,
+        0.0 * units.pc,
+        5.0 * units.km / units.s,
+    ]
+
+    # Calculating the orbit
+    ts = np.linspace(0.0, 0.58, 1000) * units.Gyr
+    o = Orbit(vxvv=vxvv)
+    o.integrate(ts, nfw, method="odeint")
+
+    return o
+
+
+o = galpy_nfw_orbit()
+(R, z) = read_data()
+
+o.plot()
+plt.scatter(R, z, s=1, color="black", marker="x")
+plt.savefig("comparison.png")
+plt.close()
diff --git a/examples/GravityTests/NFW_Halo/run.sh b/examples/GravityTests/NFW_Halo/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7f16def75f760d2e4cef4a9303b7c370ebdd1916
--- /dev/null
+++ b/examples/GravityTests/NFW_Halo/run.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+if [ ! -e test_nfw.hdf5 ]
+then
+    echo "Generate initial conditions for NFW example"	
+    if command -v python3 &>/dev/null; then
+        python3 makeIC.py
+    else 
+        python makeIC.py
+    fi
+fi
+
+# self gravity G, external potential g, hydro s, threads t and high verbosity v
+../../swift --external-gravity --threads=6 test.yml 2>&1 | tee output.log
+
+if command -v python3 &>/dev/null; then
+    python3 makePlots.py
+else 
+    python makePlots.py
+fi
diff --git a/examples/GravityTests/NFW_Halo/test.yml b/examples/GravityTests/NFW_Halo/test.yml
new file mode 100755
index 0000000000000000000000000000000000000000..73831af30769942bd7aa1c89bd7464025d2ddc85
--- /dev/null
+++ b/examples/GravityTests/NFW_Halo/test.yml
@@ -0,0 +1,41 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.988e+33 # Solar mass
+  UnitLength_in_cgs:   3.086e+21 # kpc
+  UnitVelocity_in_cgs: 1e5       # km / s
+  UnitCurrent_in_cgs:  1         # Amperes
+  UnitTemp_in_cgs:     1         # Kelvin
+
+# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
+TimeIntegration:
+  time_begin:          0.      # The starting time of the simulation (in internal units).
+  time_end:            0.6     # The end time of the simulation (in internal units).
+  dt_min:              1e-8    # The minimal time-step size of the simulation (in internal units).
+  dt_max:              1e-1    # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            output  # Common part of the name of output files
+  time_first:          0.      # Time of the first output (in internal units)
+  delta_time:          1e-3    # Time difference between consecutive outputs (in internal units)
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1e-3    # Time between statistics output
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:          test_nfw.hdf5 # The file to read
+  shift_x:            0.          # (Optional) A shift to apply to all particles read from the ICs (in internal units).
+  shift_y:            0.
+  shift_z:            0.
+  periodic:           0
+
+# Isothermal potential parameters
+NFWPotential:
+  useabspos:          0
+  position:           [0.0,0.0,0.0]      # Location of centre of potential with respect to centre of the box (internal units)
+  concentration:      8.
+  M_200:              2.0e+12  # Virial mass (internal units)
+  critical_density:   140      # Critical density (internal units)
+  timestep_mult:      0.01     # Dimensionless pre-factor for the time-step condition
diff --git a/examples/EvrardCollapse_3D/evrard.yml b/examples/HydroTests/EvrardCollapse_3D/evrard.yml
similarity index 99%
rename from examples/EvrardCollapse_3D/evrard.yml
rename to examples/HydroTests/EvrardCollapse_3D/evrard.yml
index f9a4e69f72e6bb19b818cb985ef92122b1a10b2a..c14f9151b5a4ba6af60307a689d5b2530068deb3 100644
--- a/examples/EvrardCollapse_3D/evrard.yml
+++ b/examples/HydroTests/EvrardCollapse_3D/evrard.yml
@@ -39,6 +39,7 @@ Gravity:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./evrard.hdf5       # The file to read
-
+  periodic:   0
+  
 PhysicalConstants:
   G: 1.
diff --git a/examples/EvrardCollapse_3D/getReference.sh b/examples/HydroTests/EvrardCollapse_3D/getReference.sh
similarity index 100%
rename from examples/EvrardCollapse_3D/getReference.sh
rename to examples/HydroTests/EvrardCollapse_3D/getReference.sh
diff --git a/examples/EvrardCollapse_3D/makeIC.py b/examples/HydroTests/EvrardCollapse_3D/makeIC.py
similarity index 97%
rename from examples/EvrardCollapse_3D/makeIC.py
rename to examples/HydroTests/EvrardCollapse_3D/makeIC.py
index f4d3c4c5bf7f91e5f79cfcd4e9ae23388932144e..29c4acd69ebf0638edf1273efc0f411766aebb6d 100644
--- a/examples/EvrardCollapse_3D/makeIC.py
+++ b/examples/HydroTests/EvrardCollapse_3D/makeIC.py
@@ -86,10 +86,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 0
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/EvrardCollapse_3D/plotSolution.py b/examples/HydroTests/EvrardCollapse_3D/plotSolution.py
similarity index 100%
rename from examples/EvrardCollapse_3D/plotSolution.py
rename to examples/HydroTests/EvrardCollapse_3D/plotSolution.py
diff --git a/examples/EvrardCollapse_3D/run.sh b/examples/HydroTests/EvrardCollapse_3D/run.sh
similarity index 85%
rename from examples/EvrardCollapse_3D/run.sh
rename to examples/HydroTests/EvrardCollapse_3D/run.sh
index abb7614f66fc877aa670db9b0e1335fbfe2e85d2..ae02bcc0baed1aff87c3866b77075f6cb0f89a27 100755
--- a/examples/EvrardCollapse_3D/run.sh
+++ b/examples/HydroTests/EvrardCollapse_3D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -G -t 4 evrard.yml 2>&1 | tee output.log
+../../swift --hydro --self-gravity --threads=4 evrard.yml 2>&1 | tee output.log
 
 # Get the high resolution 1D reference result if not present.
 if [ ! -e evrardCollapse3D_exact.txt ]
diff --git a/examples/Gradients/gradientsCartesian.yml b/examples/HydroTests/Gradients/gradientsCartesian.yml
similarity index 98%
rename from examples/Gradients/gradientsCartesian.yml
rename to examples/HydroTests/Gradients/gradientsCartesian.yml
index b2131bdd4d3a9242d30ff0f32b7bf3395cb433a8..0264e9ced8652f45feeba79573d3143e6b0086bb 100644
--- a/examples/Gradients/gradientsCartesian.yml
+++ b/examples/HydroTests/Gradients/gradientsCartesian.yml
@@ -31,4 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./Gradients_cartesian.hdf5       # The file to read
-
+  periodic:   1
diff --git a/examples/Gradients/gradientsRandom.yml b/examples/HydroTests/Gradients/gradientsRandom.yml
similarity index 98%
rename from examples/Gradients/gradientsRandom.yml
rename to examples/HydroTests/Gradients/gradientsRandom.yml
index 57ae849898bf8ccd63ccd7a5d685f9690403403d..1c6fcc1d077e0fd260b42e7de77490d58fb5aea9 100644
--- a/examples/Gradients/gradientsRandom.yml
+++ b/examples/HydroTests/Gradients/gradientsRandom.yml
@@ -31,4 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./Gradients_random.hdf5       # The file to read
-
+  periodic:   1
diff --git a/examples/Gradients/gradientsStretched.yml b/examples/HydroTests/Gradients/gradientsStretched.yml
similarity index 100%
rename from examples/Gradients/gradientsStretched.yml
rename to examples/HydroTests/Gradients/gradientsStretched.yml
diff --git a/examples/Gradients/makeICs.py b/examples/HydroTests/Gradients/makeICs.py
similarity index 97%
rename from examples/Gradients/makeICs.py
rename to examples/HydroTests/Gradients/makeICs.py
index 38d035d2ad2dd3dd6daacfd6f58d824e9daf6742..be70a9e614e8bc32db0c0979c42ab892ef7d068f 100644
--- a/examples/Gradients/makeICs.py
+++ b/examples/HydroTests/Gradients/makeICs.py
@@ -26,7 +26,6 @@ import sys
 # reconstruction
 
 # Parameters
-periodic= 1      # 1 For periodic box
 gamma = 5./3.     # Gas adiabatic index
 gridtype = "cartesian"
 if len(sys.argv) > 1:
@@ -153,10 +152,6 @@ grp.attrs["NumFilesPerSnapshot"] = 1
 grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Particle group
 grp = file.create_group("/PartType0")
 ds = grp.create_dataset('Coordinates', (npart, 3), 'd')
diff --git a/examples/Gradients/plot.py b/examples/HydroTests/Gradients/plot.py
similarity index 100%
rename from examples/Gradients/plot.py
rename to examples/HydroTests/Gradients/plot.py
diff --git a/examples/Gradients/run.sh b/examples/HydroTests/Gradients/run.sh
similarity index 61%
rename from examples/Gradients/run.sh
rename to examples/HydroTests/Gradients/run.sh
index 44c25ac5695175c40483d9f8b3bbd160b2fcbc0a..86ee9a68389319b1cb5f7327e8bd689b6212e6c1 100755
--- a/examples/Gradients/run.sh
+++ b/examples/HydroTests/Gradients/run.sh
@@ -1,13 +1,13 @@
 #! /bin/bash
 
 python makeICs.py stretched
-../swift -s -t 2 gradientsStretched.yml
+../../swift --hydro --threads=2 gradientsStretched.yml
 python plot.py gradients_stretched_0001.hdf5 stretched
 
 python makeICs.py cartesian
-../swift -s -t 2 gradientsCartesian.yml
+../../swift --hydro --threads=2 gradientsCartesian.yml
 python plot.py gradients_cartesian_0001.hdf5 cartesian
 
 python makeICs.py random
-../swift -s -t 2 gradientsRandom.yml
+../../swift --hydro --threads=2 gradientsRandom.yml
 python plot.py gradients_random_0001.hdf5 random
diff --git a/examples/GreshoVortex_2D/getGlass.sh b/examples/HydroTests/GreshoVortex_2D/getGlass.sh
similarity index 100%
rename from examples/GreshoVortex_2D/getGlass.sh
rename to examples/HydroTests/GreshoVortex_2D/getGlass.sh
diff --git a/examples/GreshoVortex_2D/gresho.yml b/examples/HydroTests/GreshoVortex_2D/gresho.yml
similarity index 99%
rename from examples/GreshoVortex_2D/gresho.yml
rename to examples/HydroTests/GreshoVortex_2D/gresho.yml
index df941450196a7de6cd1471e1d258756ca8c36fb1..2006bb451179ce646ec2cc41cb3aa5603489dc29 100644
--- a/examples/GreshoVortex_2D/gresho.yml
+++ b/examples/HydroTests/GreshoVortex_2D/gresho.yml
@@ -34,3 +34,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./greshoVortex.hdf5     # The file to read
+  periodic:   1
\ No newline at end of file
diff --git a/examples/GreshoVortex_2D/makeIC.py b/examples/HydroTests/GreshoVortex_2D/makeIC.py
similarity index 97%
rename from examples/GreshoVortex_2D/makeIC.py
rename to examples/HydroTests/GreshoVortex_2D/makeIC.py
index 4f4ec3407b04971882fbf3d7d7479e74bf56c762..4fb382925e41a1d00463b369bc8d95c4bc6b0aa1 100644
--- a/examples/GreshoVortex_2D/makeIC.py
+++ b/examples/HydroTests/GreshoVortex_2D/makeIC.py
@@ -89,10 +89,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/GreshoVortex_2D/plotSolution.py b/examples/HydroTests/GreshoVortex_2D/plotSolution.py
similarity index 100%
rename from examples/GreshoVortex_2D/plotSolution.py
rename to examples/HydroTests/GreshoVortex_2D/plotSolution.py
diff --git a/examples/GreshoVortex_2D/run.sh b/examples/HydroTests/GreshoVortex_2D/run.sh
similarity index 86%
rename from examples/GreshoVortex_2D/run.sh
rename to examples/HydroTests/GreshoVortex_2D/run.sh
index 6d537bcc96c68385434f685bd551a2d423f469e0..0e24112a0faafcd38a06216494f87888f3e132e2 100755
--- a/examples/GreshoVortex_2D/run.sh
+++ b/examples/HydroTests/GreshoVortex_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 gresho.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 gresho.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 11
diff --git a/examples/GreshoVortex_3D/getGlass.sh b/examples/HydroTests/GreshoVortex_3D/getGlass.sh
similarity index 100%
rename from examples/GreshoVortex_3D/getGlass.sh
rename to examples/HydroTests/GreshoVortex_3D/getGlass.sh
diff --git a/examples/GreshoVortex_3D/gresho.yml b/examples/HydroTests/GreshoVortex_3D/gresho.yml
similarity index 99%
rename from examples/GreshoVortex_3D/gresho.yml
rename to examples/HydroTests/GreshoVortex_3D/gresho.yml
index 113c03b9bd0e411bf04f29c70937ac7fab3708f3..a95a0eae3255b87337fc838f1eabe5469a724a09 100644
--- a/examples/GreshoVortex_3D/gresho.yml
+++ b/examples/HydroTests/GreshoVortex_3D/gresho.yml
@@ -35,3 +35,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./greshoVortex.hdf5     # The file to read
+  periodic:   1
\ No newline at end of file
diff --git a/examples/GreshoVortex_3D/makeIC.py b/examples/HydroTests/GreshoVortex_3D/makeIC.py
similarity index 97%
rename from examples/GreshoVortex_3D/makeIC.py
rename to examples/HydroTests/GreshoVortex_3D/makeIC.py
index cba2158016bc86f58b6e89f83cbfb473798e1cf7..03f99df1082928bd57779ff2c0e7e85f112b4f1f 100644
--- a/examples/GreshoVortex_3D/makeIC.py
+++ b/examples/HydroTests/GreshoVortex_3D/makeIC.py
@@ -90,10 +90,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/GreshoVortex_3D/plotSolution.py b/examples/HydroTests/GreshoVortex_3D/plotSolution.py
similarity index 100%
rename from examples/GreshoVortex_3D/plotSolution.py
rename to examples/HydroTests/GreshoVortex_3D/plotSolution.py
diff --git a/examples/GreshoVortex_3D/run.sh b/examples/HydroTests/GreshoVortex_3D/run.sh
similarity index 86%
rename from examples/GreshoVortex_3D/run.sh
rename to examples/HydroTests/GreshoVortex_3D/run.sh
index da7d6cee111aebcfd2fcb0f3508af80ef73cbeb0..15b613782e685d86321460b33c52fe9109230840 100755
--- a/examples/GreshoVortex_3D/run.sh
+++ b/examples/HydroTests/GreshoVortex_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 gresho.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 gresho.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 11
diff --git a/examples/InteractingBlastWaves_1D/getReference.sh b/examples/HydroTests/InteractingBlastWaves_1D/getReference.sh
similarity index 100%
rename from examples/InteractingBlastWaves_1D/getReference.sh
rename to examples/HydroTests/InteractingBlastWaves_1D/getReference.sh
diff --git a/examples/InteractingBlastWaves_1D/interactingBlastWaves.yml b/examples/HydroTests/InteractingBlastWaves_1D/interactingBlastWaves.yml
similarity index 98%
rename from examples/InteractingBlastWaves_1D/interactingBlastWaves.yml
rename to examples/HydroTests/InteractingBlastWaves_1D/interactingBlastWaves.yml
index e845599730828fd7b9880ae9aca11420ba50026c..c4960dfa2c07b6b08cd6559b1de49f27b518bf94 100644
--- a/examples/InteractingBlastWaves_1D/interactingBlastWaves.yml
+++ b/examples/HydroTests/InteractingBlastWaves_1D/interactingBlastWaves.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./interactingBlastWaves.hdf5          # The file to read
+  periodic:   1
diff --git a/examples/InteractingBlastWaves_1D/makeIC.py b/examples/HydroTests/InteractingBlastWaves_1D/makeIC.py
similarity index 96%
rename from examples/InteractingBlastWaves_1D/makeIC.py
rename to examples/HydroTests/InteractingBlastWaves_1D/makeIC.py
index bed0e20c833ccbe54ed571b954cad03ab93f4c0c..3a47bf7c42e1359dc1a9aa151e360ad0f93d2d32 100644
--- a/examples/InteractingBlastWaves_1D/makeIC.py
+++ b/examples/HydroTests/InteractingBlastWaves_1D/makeIC.py
@@ -62,10 +62,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/InteractingBlastWaves_1D/plotSolution.py b/examples/HydroTests/InteractingBlastWaves_1D/plotSolution.py
similarity index 100%
rename from examples/InteractingBlastWaves_1D/plotSolution.py
rename to examples/HydroTests/InteractingBlastWaves_1D/plotSolution.py
diff --git a/examples/InteractingBlastWaves_1D/run.sh b/examples/HydroTests/InteractingBlastWaves_1D/run.sh
similarity index 85%
rename from examples/InteractingBlastWaves_1D/run.sh
rename to examples/HydroTests/InteractingBlastWaves_1D/run.sh
index 31717bd806ddd6c98c24dfc1def6f79dddff42ff..42034d5e541c4e038a9284e88651cb6a9fa9013f 100755
--- a/examples/InteractingBlastWaves_1D/run.sh
+++ b/examples/HydroTests/InteractingBlastWaves_1D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 interactingBlastWaves.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 interactingBlastWaves.yml 2>&1 | tee output.log
 
 # Get the high resolution reference solution if not present.
 if [ ! -e interactingBlastWaves1D_exact.txt ]
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/getGlass.sh b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/getGlass.sh
similarity index 100%
rename from examples/KelvinHelmholtzGrowthRate_2D/getGlass.sh
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/getGlass.sh
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml
similarity index 98%
rename from examples/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml
index 380dc2ab3a530e89b952aa41f425e50709d73ee9..e5a46cca1aa0c8972a5427126d2ce57a26d1b262 100644
--- a/examples/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/kelvinHelmholtzGrowthRate.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./kelvinHelmholtzGrowthRate.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/makeIC.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC.py
similarity index 97%
rename from examples/KelvinHelmholtzGrowthRate_2D/makeIC.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC.py
index f21d0c0abf9b15f8253f627bcb1da43ae276fb35..25ef65fd758e0dd97d45732a2da6d2aa19f793bc 100644
--- a/examples/KelvinHelmholtzGrowthRate_2D/makeIC.py
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC.py
@@ -76,10 +76,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py
similarity index 97%
rename from examples/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py
index 5029165a6a328b6c706d37b632b14cbcd51501d0..55cd17823a1101164191c89810029370dee21e26 100644
--- a/examples/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/makeIC_regular.py
@@ -82,10 +82,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/plotSolution.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/plotSolution.py
similarity index 100%
rename from examples/KelvinHelmholtzGrowthRate_2D/plotSolution.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/plotSolution.py
diff --git a/examples/KelvinHelmholtzGrowthRate_2D/run.sh b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/run.sh
similarity index 78%
rename from examples/KelvinHelmholtzGrowthRate_2D/run.sh
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_2D/run.sh
index 3e6e026f66b14846a5c6e8e9daf99797dc3ff87a..4e565a2de588c53f54c928b3b9f7dfec483c8220 100755
--- a/examples/KelvinHelmholtzGrowthRate_2D/run.sh
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_2D/run.sh
@@ -9,7 +9,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 kelvinHelmholtzGrowthRate.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 kelvinHelmholtzGrowthRate.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 100
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/getGlass.sh b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/getGlass.sh
similarity index 100%
rename from examples/KelvinHelmholtzGrowthRate_3D/getGlass.sh
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/getGlass.sh
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml
similarity index 98%
rename from examples/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml
index e39c01645b766ae585558452683dc8e1bdf425a8..f5f7157f7d3252e8fe256b7bfc4ba83cb09ef03e 100644
--- a/examples/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/kelvinHelmholtzGrowthRate.yml
@@ -32,3 +32,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./kelvinHelmholtzGrowthRate.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/makeIC.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC.py
similarity index 97%
rename from examples/KelvinHelmholtzGrowthRate_3D/makeIC.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC.py
index a9bc20559b9fbb5da400ba5de2563cd715f473d5..d28f3617214193eca6159a7220263d36500dd1aa 100644
--- a/examples/KelvinHelmholtzGrowthRate_3D/makeIC.py
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC.py
@@ -76,10 +76,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py
similarity index 97%
rename from examples/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py
index aa7dd8f214f8ece1c1d142bf02bd653cd35f9973..51ab694f387d380c83a0b646696fd23111b3f98c 100644
--- a/examples/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/makeIC_regular.py
@@ -84,10 +84,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/plotSolution.py b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/plotSolution.py
similarity index 100%
rename from examples/KelvinHelmholtzGrowthRate_3D/plotSolution.py
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/plotSolution.py
diff --git a/examples/KelvinHelmholtzGrowthRate_3D/run.sh b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/run.sh
similarity index 78%
rename from examples/KelvinHelmholtzGrowthRate_3D/run.sh
rename to examples/HydroTests/KelvinHelmholtzGrowthRate_3D/run.sh
index 3e6e026f66b14846a5c6e8e9daf99797dc3ff87a..4e565a2de588c53f54c928b3b9f7dfec483c8220 100755
--- a/examples/KelvinHelmholtzGrowthRate_3D/run.sh
+++ b/examples/HydroTests/KelvinHelmholtzGrowthRate_3D/run.sh
@@ -9,7 +9,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 kelvinHelmholtzGrowthRate.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 kelvinHelmholtzGrowthRate.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 100
diff --git a/examples/KelvinHelmholtz_2D/kelvinHelmholtz.yml b/examples/HydroTests/KelvinHelmholtz_2D/kelvinHelmholtz.yml
similarity index 98%
rename from examples/KelvinHelmholtz_2D/kelvinHelmholtz.yml
rename to examples/HydroTests/KelvinHelmholtz_2D/kelvinHelmholtz.yml
index ccc7526b391374a4da0883f6615a65c7b93a0948..6e4e2bd43cfa3def8386b85c84570e9b9a48fbcf 100644
--- a/examples/KelvinHelmholtz_2D/kelvinHelmholtz.yml
+++ b/examples/HydroTests/KelvinHelmholtz_2D/kelvinHelmholtz.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./kelvinHelmholtz.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/KelvinHelmholtz_2D/makeIC.py b/examples/HydroTests/KelvinHelmholtz_2D/makeIC.py
similarity index 97%
rename from examples/KelvinHelmholtz_2D/makeIC.py
rename to examples/HydroTests/KelvinHelmholtz_2D/makeIC.py
index 744b39de8260720521ae8e77ed5d0a12161f2b6a..919066955c519dbac4e78e8e2a0eece842c40ab3 100644
--- a/examples/KelvinHelmholtz_2D/makeIC.py
+++ b/examples/HydroTests/KelvinHelmholtz_2D/makeIC.py
@@ -122,10 +122,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/KelvinHelmholtz_2D/makeMovie.py b/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py
similarity index 98%
rename from examples/KelvinHelmholtz_2D/makeMovie.py
rename to examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py
index 84fe99106bf607830e89b6aa663135b48b6c0744..a52784891ab4689dcd59dc27945e573e602785f3 100644
--- a/examples/KelvinHelmholtz_2D/makeMovie.py
+++ b/examples/HydroTests/KelvinHelmholtz_2D/makeMovie.py
@@ -91,6 +91,7 @@ if __name__ == "__main__":
 
     # Creation of first frame
     fig, ax = plt.subplots(1, 1, figsize=(1, 1), frameon=False)
+    ax.axis("off")  # Remove annoying black frame.
 
     data_x, data_y, density = load_and_extract("kelvinhelmholtz_0000.hdf5")
 
diff --git a/examples/KelvinHelmholtz_2D/plotSolution.py b/examples/HydroTests/KelvinHelmholtz_2D/plotSolution.py
similarity index 100%
rename from examples/KelvinHelmholtz_2D/plotSolution.py
rename to examples/HydroTests/KelvinHelmholtz_2D/plotSolution.py
diff --git a/examples/KelvinHelmholtz_2D/run.sh b/examples/HydroTests/KelvinHelmholtz_2D/run.sh
similarity index 79%
rename from examples/KelvinHelmholtz_2D/run.sh
rename to examples/HydroTests/KelvinHelmholtz_2D/run.sh
index dbb39caf383279dbc71c2baa125499d115538654..355bf052a7ad124bcb4d88254ad780a7ffa97aba 100755
--- a/examples/KelvinHelmholtz_2D/run.sh
+++ b/examples/HydroTests/KelvinHelmholtz_2D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 kelvinHelmholtz.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 kelvinHelmholtz.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 6
diff --git a/examples/KeplerianRing/README.md b/examples/HydroTests/KeplerianRing/README.md
similarity index 97%
rename from examples/KeplerianRing/README.md
rename to examples/HydroTests/KeplerianRing/README.md
index 1c361f275d60ef1ca46d696e2e9507bb749e531c..1cb2e2119d0f0bb093abf194ab18da91dd587d32 100644
--- a/examples/KeplerianRing/README.md
+++ b/examples/HydroTests/KeplerianRing/README.md
@@ -69,7 +69,7 @@ Plotting
 
 Once you have ran swift (we suggest that you use the following)
 
-    ../swift -g -S -s -t 16 keplerian_ring.yml 2>&1 | tee output.log
+    ../swift --external-gravity --stars --hydro --threads=16 keplerian_ring.yml 2>&1 | tee output.log
 
 there will be around 350 ```.hdf5``` files in your directory. To check out
 the results of the example use the plotting script:
diff --git a/examples/KeplerianRing/keplerian_ring.yml b/examples/HydroTests/KeplerianRing/keplerian_ring.yml
similarity index 99%
rename from examples/KeplerianRing/keplerian_ring.yml
rename to examples/HydroTests/KeplerianRing/keplerian_ring.yml
index cc5db2a06adbe9678207454c6504a6fa315675cf..2195acfb55121ff595c471ad146b40752d9aa84e 100644
--- a/examples/KeplerianRing/keplerian_ring.yml
+++ b/examples/HydroTests/KeplerianRing/keplerian_ring.yml
@@ -32,7 +32,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  initial_conditions.hdf5        # The file to read
-
+  periodic:   1
+  
 # External potential parameters
 PointMassPotential:
   position:        [5.,5.,5.]  # location of external point mass in internal units
diff --git a/examples/KeplerianRing/makeIC.py b/examples/HydroTests/KeplerianRing/makeIC.py
similarity index 100%
rename from examples/KeplerianRing/makeIC.py
rename to examples/HydroTests/KeplerianRing/makeIC.py
diff --git a/examples/KeplerianRing/make_movie.py b/examples/HydroTests/KeplerianRing/make_movie.py
similarity index 100%
rename from examples/KeplerianRing/make_movie.py
rename to examples/HydroTests/KeplerianRing/make_movie.py
diff --git a/examples/KeplerianRing/plotSolution.py b/examples/HydroTests/KeplerianRing/plotSolution.py
similarity index 100%
rename from examples/KeplerianRing/plotSolution.py
rename to examples/HydroTests/KeplerianRing/plotSolution.py
diff --git a/examples/KeplerianRing/run.sh b/examples/HydroTests/KeplerianRing/run.sh
similarity index 79%
rename from examples/KeplerianRing/run.sh
rename to examples/HydroTests/KeplerianRing/run.sh
index 0195846a8839a27083594c20569b1fd4d49f4c16..b11a0c1e52f2792447ffe39efbdf5c7b2ddda437 100755
--- a/examples/KeplerianRing/run.sh
+++ b/examples/HydroTests/KeplerianRing/run.sh
@@ -9,7 +9,7 @@ then
 fi
 
 rm -rf keplerian_ring_*.hdf5
-../swift -g -s -t 1 -v 1 keplerian_ring.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --threads=1 --verbose=1 keplerian_ring.yml 2>&1 | tee output.log
 
 echo
 echo
diff --git a/examples/KeplerianRing/testplots.py b/examples/HydroTests/KeplerianRing/testplots.py
similarity index 100%
rename from examples/KeplerianRing/testplots.py
rename to examples/HydroTests/KeplerianRing/testplots.py
diff --git a/examples/KeplerianRing/write_gadget.py b/examples/HydroTests/KeplerianRing/write_gadget.py
similarity index 100%
rename from examples/KeplerianRing/write_gadget.py
rename to examples/HydroTests/KeplerianRing/write_gadget.py
diff --git a/examples/Noh_1D/makeIC.py b/examples/HydroTests/Noh_1D/makeIC.py
similarity index 96%
rename from examples/Noh_1D/makeIC.py
rename to examples/HydroTests/Noh_1D/makeIC.py
index 176f3517455db7a8b0994ac7d1e65fb9cb7419d4..9d9a5e5b62edeedd8f5b2732c240b9ea2878c92d 100644
--- a/examples/Noh_1D/makeIC.py
+++ b/examples/HydroTests/Noh_1D/makeIC.py
@@ -66,10 +66,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Noh_2D/noh.yml b/examples/HydroTests/Noh_1D/noh.yml
similarity index 98%
rename from examples/Noh_2D/noh.yml
rename to examples/HydroTests/Noh_1D/noh.yml
index 1d126f19babd0c9fe28afff907b3fe8259467a24..58e13ddda8939c8fc5fa4360a498a87f1c5b189a 100644
--- a/examples/Noh_2D/noh.yml
+++ b/examples/HydroTests/Noh_1D/noh.yml
@@ -31,4 +31,6 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./noh.hdf5          # The file to read
+  periodic:   1
 
+  
\ No newline at end of file
diff --git a/examples/Noh_1D/plotSolution.py b/examples/HydroTests/Noh_1D/plotSolution.py
similarity index 100%
rename from examples/Noh_1D/plotSolution.py
rename to examples/HydroTests/Noh_1D/plotSolution.py
diff --git a/examples/Noh_1D/run.sh b/examples/HydroTests/Noh_1D/run.sh
similarity index 79%
rename from examples/Noh_1D/run.sh
rename to examples/HydroTests/Noh_1D/run.sh
index 77788bfa8429e2fbf0502068baa70598acaaa791..0a7bd0574c19428a7f82141e619aed1f49e677be 100755
--- a/examples/Noh_1D/run.sh
+++ b/examples/HydroTests/Noh_1D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 noh.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 noh.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 12
diff --git a/examples/Noh_2D/getGlass.sh b/examples/HydroTests/Noh_2D/getGlass.sh
similarity index 100%
rename from examples/Noh_2D/getGlass.sh
rename to examples/HydroTests/Noh_2D/getGlass.sh
diff --git a/examples/Noh_2D/makeIC.py b/examples/HydroTests/Noh_2D/makeIC.py
similarity index 96%
rename from examples/Noh_2D/makeIC.py
rename to examples/HydroTests/Noh_2D/makeIC.py
index f7239fa3cd188637df929f86451d20a9978bd1f5..83bb1ac6773074d0c10d3eb425b34c082a971fd8 100644
--- a/examples/Noh_2D/makeIC.py
+++ b/examples/HydroTests/Noh_2D/makeIC.py
@@ -73,10 +73,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Noh_1D/noh.yml b/examples/HydroTests/Noh_2D/noh.yml
similarity index 98%
rename from examples/Noh_1D/noh.yml
rename to examples/HydroTests/Noh_2D/noh.yml
index 1d126f19babd0c9fe28afff907b3fe8259467a24..eaf991631854e9a9781f0fcee50d996f8af949cd 100644
--- a/examples/Noh_1D/noh.yml
+++ b/examples/HydroTests/Noh_2D/noh.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./noh.hdf5          # The file to read
+  periodic:   1
 
diff --git a/examples/Noh_2D/plotSolution.py b/examples/HydroTests/Noh_2D/plotSolution.py
similarity index 100%
rename from examples/Noh_2D/plotSolution.py
rename to examples/HydroTests/Noh_2D/plotSolution.py
diff --git a/examples/Noh_2D/run.sh b/examples/HydroTests/Noh_2D/run.sh
similarity index 85%
rename from examples/Noh_2D/run.sh
rename to examples/HydroTests/Noh_2D/run.sh
index cff200801018e04ea560bd2c3fbd84057aec2d7c..36e2d7db554823c60bacbd2d907b9d06789a9fcd 100755
--- a/examples/Noh_2D/run.sh
+++ b/examples/HydroTests/Noh_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 2 noh.yml 2>&1 | tee output.log
+../../swift --hydro --threads=2 noh.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 12
diff --git a/examples/Noh_3D/getGlass.sh b/examples/HydroTests/Noh_3D/getGlass.sh
similarity index 100%
rename from examples/Noh_3D/getGlass.sh
rename to examples/HydroTests/Noh_3D/getGlass.sh
diff --git a/examples/Noh_3D/makeIC.py b/examples/HydroTests/Noh_3D/makeIC.py
similarity index 96%
rename from examples/Noh_3D/makeIC.py
rename to examples/HydroTests/Noh_3D/makeIC.py
index 0c25a5c8b3e967185cf16bae4b1f21c215266def..2d560a1e869c6c12e557c82402d6e8629ecf661c 100644
--- a/examples/Noh_3D/makeIC.py
+++ b/examples/HydroTests/Noh_3D/makeIC.py
@@ -75,10 +75,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Noh_3D/noh.yml b/examples/HydroTests/Noh_3D/noh.yml
similarity index 98%
rename from examples/Noh_3D/noh.yml
rename to examples/HydroTests/Noh_3D/noh.yml
index cc15af7ec190cd2c10cdff3a3ccb3f0beaf7e177..e005d394a6d3645ca33950af625b0267a62ca7d7 100644
--- a/examples/Noh_3D/noh.yml
+++ b/examples/HydroTests/Noh_3D/noh.yml
@@ -32,4 +32,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./noh.hdf5          # The file to read
+  periodic:   1
 
diff --git a/examples/Noh_3D/plotSolution.py b/examples/HydroTests/Noh_3D/plotSolution.py
similarity index 100%
rename from examples/Noh_3D/plotSolution.py
rename to examples/HydroTests/Noh_3D/plotSolution.py
diff --git a/examples/Noh_3D/run.sh b/examples/HydroTests/Noh_3D/run.sh
similarity index 85%
rename from examples/Noh_3D/run.sh
rename to examples/HydroTests/Noh_3D/run.sh
index b9e4fb145b2465433aa2bc0362aba19cc1267461..7845b5cfb592f0f8ac4c3951b48689623c06b21c 100755
--- a/examples/Noh_3D/run.sh
+++ b/examples/HydroTests/Noh_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 2 noh.yml 2>&1 | tee output.log
+../../swift --hydro --threads=2 noh.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 12
diff --git a/examples/PerturbedBox_2D/makeIC.py b/examples/HydroTests/PerturbedBox_2D/makeIC.py
similarity index 97%
rename from examples/PerturbedBox_2D/makeIC.py
rename to examples/HydroTests/PerturbedBox_2D/makeIC.py
index 87a41517772570870e04c79d3694c115a909e214..7f52525bdf508603a23f93c0fc7d8cda7f8f13cb 100644
--- a/examples/PerturbedBox_2D/makeIC.py
+++ b/examples/HydroTests/PerturbedBox_2D/makeIC.py
@@ -86,10 +86,6 @@ grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["NumPart_Total"] = numPart
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/PerturbedBox_2D/perturbedPlane.yml b/examples/HydroTests/PerturbedBox_2D/perturbedPlane.yml
similarity index 98%
rename from examples/PerturbedBox_2D/perturbedPlane.yml
rename to examples/HydroTests/PerturbedBox_2D/perturbedPlane.yml
index a0c6b6d9dbc7a677002dbce5abc6e5d268b56e97..4d03b30398bec34414636803caf6bf3bdc99251d 100644
--- a/examples/PerturbedBox_2D/perturbedPlane.yml
+++ b/examples/HydroTests/PerturbedBox_2D/perturbedPlane.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./perturbedPlane.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/PerturbedBox_3D/makeIC.py b/examples/HydroTests/PerturbedBox_3D/makeIC.py
similarity index 97%
rename from examples/PerturbedBox_3D/makeIC.py
rename to examples/HydroTests/PerturbedBox_3D/makeIC.py
index 1b0fc284e4c40b51fca45f117b92175a0ea45f31..f2d8357f2f96a4aa6efaa14822c442a884415b56 100644
--- a/examples/PerturbedBox_3D/makeIC.py
+++ b/examples/HydroTests/PerturbedBox_3D/makeIC.py
@@ -88,10 +88,6 @@ grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["NumPart_Total"] = numPart
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/PerturbedBox_3D/perturbedBox.yml b/examples/HydroTests/PerturbedBox_3D/perturbedBox.yml
similarity index 98%
rename from examples/PerturbedBox_3D/perturbedBox.yml
rename to examples/HydroTests/PerturbedBox_3D/perturbedBox.yml
index 3148510979d0e349c0d6242bf11e1a0db94f9e1f..6010cf457b2b67c0fce0332a0216aa9359673e3b 100644
--- a/examples/PerturbedBox_3D/perturbedBox.yml
+++ b/examples/HydroTests/PerturbedBox_3D/perturbedBox.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./perturbedBox.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/PerturbedBox_3D/run.sh b/examples/HydroTests/PerturbedBox_3D/run.sh
similarity index 74%
rename from examples/PerturbedBox_3D/run.sh
rename to examples/HydroTests/PerturbedBox_3D/run.sh
index e20bff52d18322ce377fb589900fd9e13eefe64d..463f6fecf16e13c76d713bba3ef4112ffbde509c 100755
--- a/examples/PerturbedBox_3D/run.sh
+++ b/examples/HydroTests/PerturbedBox_3D/run.sh
@@ -7,4 +7,4 @@ then
     python makeIC.py 50
 fi
 
-../swift -s -t 16 perturbedBox.yml 2>&1 | tee output.log
+../../swift --hydro --threads=16 perturbedBox.yml 2>&1 | tee output.log
diff --git a/examples/SedovBlast_1D/makeIC.py b/examples/HydroTests/SedovBlast_1D/makeIC.py
similarity index 96%
rename from examples/SedovBlast_1D/makeIC.py
rename to examples/HydroTests/SedovBlast_1D/makeIC.py
index 7177f3a7670aa054e3d7341a11a7359b3d855837..28b9c4bfd69395b94628bda3cfc3e59166460c79 100644
--- a/examples/SedovBlast_1D/makeIC.py
+++ b/examples/HydroTests/SedovBlast_1D/makeIC.py
@@ -72,10 +72,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SedovBlast_1D/plotSolution.py b/examples/HydroTests/SedovBlast_1D/plotSolution.py
similarity index 94%
rename from examples/SedovBlast_1D/plotSolution.py
rename to examples/HydroTests/SedovBlast_1D/plotSolution.py
index 2738b7c8f301a7351d962ac0f29faccd0a770fc9..c6d4a989da7493f7b500946610eea8832696bf6f 100644
--- a/examples/SedovBlast_1D/plotSolution.py
+++ b/examples/HydroTests/SedovBlast_1D/plotSolution.py
@@ -83,6 +83,12 @@ S = sim["/PartType0/Entropy"][:]
 P = sim["/PartType0/Pressure"][:]
 rho = sim["/PartType0/Density"][:]
 
+try:
+    alpha = sim["/PartType0/Viscosity"][:]
+    plot_alpha = True 
+except:
+    plot_alpha = False
+
 
 # Now, work our the solution....
 
@@ -246,14 +252,23 @@ ylabel("${\\rm{Internal~Energy}}~u$", labelpad=0)
 xlim(0, 1.3 * r_shock)
 ylim(-2, 22)
 
-# Entropy profile ---------------------------------
+# Entropy/alpha profile ---------------------------------
 subplot(235)
-plot(r, S, '.', color='r', ms=2.)
-plot(r_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+
+if plot_alpha:
+    plot(r, alpha, '.', color='r', ms=2.0)
+    plot([r_shock, r_shock], [-1, 2], "--", color="k", alpha=0.8, lw=1.2)
+    ylabel(r"${\rm{Viscosity}}~\alpha$", labelpad=0)
+    # Show location of shock
+    ylim(0, 2)
+else:
+    plot(r, S, '.', color='r', ms=2.0)
+    plot(r_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+    ylabel("${\\rm{Entropy}}~S$", labelpad=0)
+    ylim(-5, 50)
+
 xlabel("${\\rm{Radius}}~r$", labelpad=0)
-ylabel("${\\rm{Entropy}}~S$", labelpad=0)
 xlim(0, 1.3 * r_shock)
-ylim(-5, 50)
 
 # Information -------------------------------------
 subplot(236, frameon=False)
diff --git a/examples/SedovBlast_1D/run.sh b/examples/HydroTests/SedovBlast_1D/run.sh
similarity index 77%
rename from examples/SedovBlast_1D/run.sh
rename to examples/HydroTests/SedovBlast_1D/run.sh
index 4b9a84f069673bd6def3b96faec71b9d4fdd0dda..2888790eb1877541166c04002f9ae9539e9ef6d7 100755
--- a/examples/SedovBlast_1D/run.sh
+++ b/examples/HydroTests/SedovBlast_1D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 sedov.yml 2>&1 | tee output.log
+../../swift --hydro --limiter --threads=1 sedov.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 5
diff --git a/examples/SedovBlast_2D/sedov.yml b/examples/HydroTests/SedovBlast_1D/sedov.yml
similarity index 94%
rename from examples/SedovBlast_2D/sedov.yml
rename to examples/HydroTests/SedovBlast_1D/sedov.yml
index 098ca7a0d6264f016727709723aafdfb1224d460..b4252581d6eb3b2932a074e7545b2d308be51865 100644
--- a/examples/SedovBlast_2D/sedov.yml
+++ b/examples/HydroTests/SedovBlast_1D/sedov.yml
@@ -11,7 +11,7 @@ TimeIntegration:
   time_begin: 0.    # The starting time of the simulation (in internal units).
   time_end:   5e-2  # The end time of the simulation (in internal units).
   dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-4  # The maximal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
 
 # Parameters governing the snapshots
 Snapshots:
@@ -31,4 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sedov.hdf5          # The file to read
-
+  periodic:   1
diff --git a/examples/SedovBlast_2D/getGlass.sh b/examples/HydroTests/SedovBlast_2D/getGlass.sh
similarity index 100%
rename from examples/SedovBlast_2D/getGlass.sh
rename to examples/HydroTests/SedovBlast_2D/getGlass.sh
diff --git a/examples/SedovBlast_2D/makeIC.py b/examples/HydroTests/SedovBlast_2D/makeIC.py
similarity index 96%
rename from examples/SedovBlast_2D/makeIC.py
rename to examples/HydroTests/SedovBlast_2D/makeIC.py
index 0e83c7b19b9ac9bd69e20950a64e8a49dd8d0df9..cd1e433c104fd013a71c5a501c166194a7f3f50f 100644
--- a/examples/SedovBlast_2D/makeIC.py
+++ b/examples/HydroTests/SedovBlast_2D/makeIC.py
@@ -72,10 +72,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SedovBlast_2D/plotSolution.py b/examples/HydroTests/SedovBlast_2D/plotSolution.py
similarity index 100%
rename from examples/SedovBlast_2D/plotSolution.py
rename to examples/HydroTests/SedovBlast_2D/plotSolution.py
diff --git a/examples/SedovBlast_2D/run.sh b/examples/HydroTests/SedovBlast_2D/run.sh
similarity index 83%
rename from examples/SedovBlast_2D/run.sh
rename to examples/HydroTests/SedovBlast_2D/run.sh
index a32c8f0d6f3116d5486fe1bd086bf8df49d06020..0ad75f9d378712bd62cdd47c66240976cc57f04c 100755
--- a/examples/SedovBlast_2D/run.sh
+++ b/examples/HydroTests/SedovBlast_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 sedov.yml 2>&1 | tee output.log
+../../swift --hydro --limiter --threads=1 sedov.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 5
diff --git a/examples/SedovBlast_1D/sedov.yml b/examples/HydroTests/SedovBlast_2D/sedov.yml
similarity index 90%
rename from examples/SedovBlast_1D/sedov.yml
rename to examples/HydroTests/SedovBlast_2D/sedov.yml
index 5ef105b06c23ba577129f29a817c058457e7387f..b4252581d6eb3b2932a074e7545b2d308be51865 100644
--- a/examples/SedovBlast_1D/sedov.yml
+++ b/examples/HydroTests/SedovBlast_2D/sedov.yml
@@ -11,7 +11,7 @@ TimeIntegration:
   time_begin: 0.    # The starting time of the simulation (in internal units).
   time_end:   5e-2  # The end time of the simulation (in internal units).
   dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-5  # The maximal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
 
 # Parameters governing the snapshots
 Snapshots:
@@ -21,7 +21,7 @@ Snapshots:
 
 # Parameters governing the conserved quantities statistics
 Statistics:
-  delta_time:          1e-5 # Time between statistics output
+  delta_time:          1e-3 # Time between statistics output
 
 # Parameters for the hydrodynamics scheme
 SPH:
@@ -31,4 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sedov.hdf5          # The file to read
-
+  periodic:   1
diff --git a/examples/SedovBlast_3D/getGlass.sh b/examples/HydroTests/SedovBlast_3D/getGlass.sh
similarity index 100%
rename from examples/SedovBlast_3D/getGlass.sh
rename to examples/HydroTests/SedovBlast_3D/getGlass.sh
diff --git a/examples/SedovBlast_3D/makeIC.py b/examples/HydroTests/SedovBlast_3D/makeIC.py
similarity index 96%
rename from examples/SedovBlast_3D/makeIC.py
rename to examples/HydroTests/SedovBlast_3D/makeIC.py
index e1b743c6cdcd8dcc2f8da14d1d5589fb9ed111f0..30e0e31927db6343e58549bc9c7754bc274f51ce 100644
--- a/examples/SedovBlast_3D/makeIC.py
+++ b/examples/HydroTests/SedovBlast_3D/makeIC.py
@@ -72,10 +72,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SedovBlast_3D/plotSolution.py b/examples/HydroTests/SedovBlast_3D/plotSolution.py
similarity index 100%
rename from examples/SedovBlast_3D/plotSolution.py
rename to examples/HydroTests/SedovBlast_3D/plotSolution.py
diff --git a/examples/SedovBlast_3D/run.sh b/examples/HydroTests/SedovBlast_3D/run.sh
similarity index 83%
rename from examples/SedovBlast_3D/run.sh
rename to examples/HydroTests/SedovBlast_3D/run.sh
index 00d5e5b91c31e64f824a3d2a28c8e1a126684a74..62af72bacca62cd18fa77300d887b0bf0dd20789 100755
--- a/examples/SedovBlast_3D/run.sh
+++ b/examples/HydroTests/SedovBlast_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 sedov.yml 2>&1 | tee output.log
+../../swift --hydro --limiter --threads=4 sedov.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 5
diff --git a/examples/SedovBlast_3D/sedov.yml b/examples/HydroTests/SedovBlast_3D/sedov.yml
similarity index 93%
rename from examples/SedovBlast_3D/sedov.yml
rename to examples/HydroTests/SedovBlast_3D/sedov.yml
index 75849e33c0c644a18cd7357f901699d0d682c160..19e8c72538a748304ca4da076458c9ae27dc8f46 100644
--- a/examples/SedovBlast_3D/sedov.yml
+++ b/examples/HydroTests/SedovBlast_3D/sedov.yml
@@ -11,7 +11,7 @@ TimeIntegration:
   time_begin: 0.    # The starting time of the simulation (in internal units).
   time_end:   5e-2  # The end time of the simulation (in internal units).
   dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-4  # The maximal time-step size of the simulation (in internal units).
+  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
 
 # Parameters governing the snapshots
 Snapshots:
@@ -32,5 +32,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:                    ./sedov.hdf5          
+  periodic:                     1
   smoothing_length_scaling:     3.33
- 
diff --git a/examples/SineWavePotential_1D/makeIC.py b/examples/HydroTests/SineWavePotential_1D/makeIC.py
similarity index 97%
rename from examples/SineWavePotential_1D/makeIC.py
rename to examples/HydroTests/SineWavePotential_1D/makeIC.py
index afbf1bc0fa47a27677cb9c5645d439432bd9fd9a..39a78393650c7a8c0c01814fa10f514cc277e685 100644
--- a/examples/SineWavePotential_1D/makeIC.py
+++ b/examples/HydroTests/SineWavePotential_1D/makeIC.py
@@ -74,10 +74,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SineWavePotential_1D/plotSolution.py b/examples/HydroTests/SineWavePotential_1D/plotSolution.py
similarity index 100%
rename from examples/SineWavePotential_1D/plotSolution.py
rename to examples/HydroTests/SineWavePotential_1D/plotSolution.py
diff --git a/examples/SineWavePotential_1D/run.sh b/examples/HydroTests/SineWavePotential_1D/run.sh
similarity index 70%
rename from examples/SineWavePotential_1D/run.sh
rename to examples/HydroTests/SineWavePotential_1D/run.sh
index 077cf1c0cc64ef7a85cfd0e67f8f490b0de4ba37..920bd413a71a99e043cc7d31be088e0037c6c7c1 100755
--- a/examples/SineWavePotential_1D/run.sh
+++ b/examples/HydroTests/SineWavePotential_1D/run.sh
@@ -6,7 +6,7 @@ then
   python makeIC.py
 fi
 
-../swift -g -s -t 2 sineWavePotential.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --threads=2 sineWavePotential.yml 2>&1 | tee output.log
 
 for f in sineWavePotential_*.hdf5
 do
diff --git a/examples/SineWavePotential_1D/sineWavePotential.yml b/examples/HydroTests/SineWavePotential_1D/sineWavePotential.yml
similarity index 98%
rename from examples/SineWavePotential_1D/sineWavePotential.yml
rename to examples/HydroTests/SineWavePotential_1D/sineWavePotential.yml
index e6285785099f10902ea60b21334a0ad26c0593de..a21a0b5936ab0a62a7b1f29c56145bed79ba73c4 100644
--- a/examples/SineWavePotential_1D/sineWavePotential.yml
+++ b/examples/HydroTests/SineWavePotential_1D/sineWavePotential.yml
@@ -31,7 +31,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  sineWavePotential.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 SineWavePotential:
   amplitude: 10.
diff --git a/examples/SineWavePotential_2D/makeIC.py b/examples/HydroTests/SineWavePotential_2D/makeIC.py
similarity index 96%
rename from examples/SineWavePotential_2D/makeIC.py
rename to examples/HydroTests/SineWavePotential_2D/makeIC.py
index 62ae89f8f52bff9c0db37cd537f286ab817da3fe..057760502e561b5ec5d98e716b79119e3637ef57 100644
--- a/examples/SineWavePotential_2D/makeIC.py
+++ b/examples/HydroTests/SineWavePotential_2D/makeIC.py
@@ -70,10 +70,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SineWavePotential_2D/plotSolution.py b/examples/HydroTests/SineWavePotential_2D/plotSolution.py
similarity index 100%
rename from examples/SineWavePotential_2D/plotSolution.py
rename to examples/HydroTests/SineWavePotential_2D/plotSolution.py
diff --git a/examples/SineWavePotential_2D/run.sh b/examples/HydroTests/SineWavePotential_2D/run.sh
similarity index 70%
rename from examples/SineWavePotential_2D/run.sh
rename to examples/HydroTests/SineWavePotential_2D/run.sh
index 077cf1c0cc64ef7a85cfd0e67f8f490b0de4ba37..920bd413a71a99e043cc7d31be088e0037c6c7c1 100755
--- a/examples/SineWavePotential_2D/run.sh
+++ b/examples/HydroTests/SineWavePotential_2D/run.sh
@@ -6,7 +6,7 @@ then
   python makeIC.py
 fi
 
-../swift -g -s -t 2 sineWavePotential.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --threads=2 sineWavePotential.yml 2>&1 | tee output.log
 
 for f in sineWavePotential_*.hdf5
 do
diff --git a/examples/SineWavePotential_2D/sineWavePotential.yml b/examples/HydroTests/SineWavePotential_2D/sineWavePotential.yml
similarity index 98%
rename from examples/SineWavePotential_2D/sineWavePotential.yml
rename to examples/HydroTests/SineWavePotential_2D/sineWavePotential.yml
index 9107652f65c343d68fc92e699d45710265d65308..63d575e7e2486cf4428bb8b11e1ba16da6e08d99 100644
--- a/examples/SineWavePotential_2D/sineWavePotential.yml
+++ b/examples/HydroTests/SineWavePotential_2D/sineWavePotential.yml
@@ -31,7 +31,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  sineWavePotential.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 SineWavePotential:
   amplitude: 10.
diff --git a/examples/SineWavePotential_3D/makeIC.py b/examples/HydroTests/SineWavePotential_3D/makeIC.py
similarity index 97%
rename from examples/SineWavePotential_3D/makeIC.py
rename to examples/HydroTests/SineWavePotential_3D/makeIC.py
index 4833ec1b055e27b63751136f0491e972fb9e492a..a4f39238ba40bf6769e0fb44fe8da706730fe45b 100644
--- a/examples/SineWavePotential_3D/makeIC.py
+++ b/examples/HydroTests/SineWavePotential_3D/makeIC.py
@@ -81,10 +81,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SineWavePotential_3D/plotSolution.py b/examples/HydroTests/SineWavePotential_3D/plotSolution.py
similarity index 100%
rename from examples/SineWavePotential_3D/plotSolution.py
rename to examples/HydroTests/SineWavePotential_3D/plotSolution.py
diff --git a/examples/SineWavePotential_3D/run.sh b/examples/HydroTests/SineWavePotential_3D/run.sh
similarity index 70%
rename from examples/SineWavePotential_3D/run.sh
rename to examples/HydroTests/SineWavePotential_3D/run.sh
index 077cf1c0cc64ef7a85cfd0e67f8f490b0de4ba37..920bd413a71a99e043cc7d31be088e0037c6c7c1 100755
--- a/examples/SineWavePotential_3D/run.sh
+++ b/examples/HydroTests/SineWavePotential_3D/run.sh
@@ -6,7 +6,7 @@ then
   python makeIC.py
 fi
 
-../swift -g -s -t 2 sineWavePotential.yml 2>&1 | tee output.log
+../../swift --external-gravity --hydro --threads=2 sineWavePotential.yml 2>&1 | tee output.log
 
 for f in sineWavePotential_*.hdf5
 do
diff --git a/examples/SineWavePotential_3D/sineWavePotential.yml b/examples/HydroTests/SineWavePotential_3D/sineWavePotential.yml
similarity index 98%
rename from examples/SineWavePotential_3D/sineWavePotential.yml
rename to examples/HydroTests/SineWavePotential_3D/sineWavePotential.yml
index 8a49d8bc40eb662d62b2b6550b70fe380a7564f5..5b91feae0ecf8ad2f4f257374900a01f031acff1 100644
--- a/examples/SineWavePotential_3D/sineWavePotential.yml
+++ b/examples/HydroTests/SineWavePotential_3D/sineWavePotential.yml
@@ -31,7 +31,8 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  sineWavePotential.hdf5       # The file to read
- 
+  periodic:   1
+  
 # External potential parameters
 SineWavePotential:
   amplitude: 10.
diff --git a/examples/SodShock_2D/getGlass.sh b/examples/HydroTests/SodShockSpherical_2D/getGlass.sh
similarity index 100%
rename from examples/SodShock_2D/getGlass.sh
rename to examples/HydroTests/SodShockSpherical_2D/getGlass.sh
diff --git a/examples/SodShockSpherical_2D/getReference.sh b/examples/HydroTests/SodShockSpherical_2D/getReference.sh
similarity index 100%
rename from examples/SodShockSpherical_2D/getReference.sh
rename to examples/HydroTests/SodShockSpherical_2D/getReference.sh
diff --git a/examples/SodShockSpherical_2D/makeIC.py b/examples/HydroTests/SodShockSpherical_2D/makeIC.py
similarity index 97%
rename from examples/SodShockSpherical_2D/makeIC.py
rename to examples/HydroTests/SodShockSpherical_2D/makeIC.py
index ac9f6e193769d7466f5b8e41a408da2350777be6..bc2c7ed1dcae5adfbfdcaf01c6b5a36bf5669e9e 100644
--- a/examples/SodShockSpherical_2D/makeIC.py
+++ b/examples/HydroTests/SodShockSpherical_2D/makeIC.py
@@ -100,10 +100,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SodShockSpherical_2D/plotSolution.py b/examples/HydroTests/SodShockSpherical_2D/plotSolution.py
similarity index 100%
rename from examples/SodShockSpherical_2D/plotSolution.py
rename to examples/HydroTests/SodShockSpherical_2D/plotSolution.py
diff --git a/examples/SodShockSpherical_2D/run.sh b/examples/HydroTests/SodShockSpherical_2D/run.sh
similarity index 89%
rename from examples/SodShockSpherical_2D/run.sh
rename to examples/HydroTests/SodShockSpherical_2D/run.sh
index d662d20f40ef9e221285d5820e867607804e9dbe..609f2e0ae065a1fa76ee7bcfa90efa9cb1aa020a 100755
--- a/examples/SodShockSpherical_2D/run.sh
+++ b/examples/HydroTests/SodShockSpherical_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 sodShock.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 sodShock.yml 2>&1 | tee output.log
 
 # Get the high resolution 1D reference solution if not present.
 if [ ! -e sodShockSpherical2D_exact.txt ]
diff --git a/examples/SodShockSpherical_2D/sodShock.yml b/examples/HydroTests/SodShockSpherical_2D/sodShock.yml
similarity index 98%
rename from examples/SodShockSpherical_2D/sodShock.yml
rename to examples/HydroTests/SodShockSpherical_2D/sodShock.yml
index a26ab95b21c782ce83310038432ac08df0e024c3..4ef13c26ccf55163f9276b6e095c351efd9ecb35 100644
--- a/examples/SodShockSpherical_2D/sodShock.yml
+++ b/examples/HydroTests/SodShockSpherical_2D/sodShock.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/SodShock_3D/getGlass.sh b/examples/HydroTests/SodShockSpherical_3D/getGlass.sh
similarity index 100%
rename from examples/SodShock_3D/getGlass.sh
rename to examples/HydroTests/SodShockSpherical_3D/getGlass.sh
diff --git a/examples/SodShockSpherical_3D/getReference.sh b/examples/HydroTests/SodShockSpherical_3D/getReference.sh
similarity index 100%
rename from examples/SodShockSpherical_3D/getReference.sh
rename to examples/HydroTests/SodShockSpherical_3D/getReference.sh
diff --git a/examples/SodShockSpherical_3D/makeIC.py b/examples/HydroTests/SodShockSpherical_3D/makeIC.py
similarity index 97%
rename from examples/SodShockSpherical_3D/makeIC.py
rename to examples/HydroTests/SodShockSpherical_3D/makeIC.py
index be8f9b61a1beef00f49786860ce94287b30e2ab3..3884fc29280209d465b721230ae19b474a42f6a0 100644
--- a/examples/SodShockSpherical_3D/makeIC.py
+++ b/examples/HydroTests/SodShockSpherical_3D/makeIC.py
@@ -102,10 +102,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SodShockSpherical_3D/plotSolution.py b/examples/HydroTests/SodShockSpherical_3D/plotSolution.py
similarity index 100%
rename from examples/SodShockSpherical_3D/plotSolution.py
rename to examples/HydroTests/SodShockSpherical_3D/plotSolution.py
diff --git a/examples/SodShockSpherical_3D/run.sh b/examples/HydroTests/SodShockSpherical_3D/run.sh
similarity index 89%
rename from examples/SodShockSpherical_3D/run.sh
rename to examples/HydroTests/SodShockSpherical_3D/run.sh
index faf979869e175172ce31db6ac5021daf1758f3b0..c511dbcc6b18248bcfd33a9e0216cd22cf26aead 100755
--- a/examples/SodShockSpherical_3D/run.sh
+++ b/examples/HydroTests/SodShockSpherical_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 sodShock.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 sodShock.yml 2>&1 | tee output.log
 
 # Get the high resolution 1D reference solution if not present.
 if [ ! -e sodShockSpherical3D_exact.txt ]
diff --git a/examples/SodShockSpherical_3D/sodShock.yml b/examples/HydroTests/SodShockSpherical_3D/sodShock.yml
similarity index 98%
rename from examples/SodShockSpherical_3D/sodShock.yml
rename to examples/HydroTests/SodShockSpherical_3D/sodShock.yml
index 3fc4a1fb2b8cc5f6a603abf4c87ac99c7647b9bd..16d3bd313cf8a365fb82d3142ba1ac4fd065d193 100644
--- a/examples/SodShockSpherical_3D/sodShock.yml
+++ b/examples/HydroTests/SodShockSpherical_3D/sodShock.yml
@@ -32,4 +32,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/SodShock_1D/makeIC.py b/examples/HydroTests/SodShock_1D/makeIC.py
similarity index 97%
rename from examples/SodShock_1D/makeIC.py
rename to examples/HydroTests/SodShock_1D/makeIC.py
index a5c7f03b24d10e81057dbe25855f33f795218f19..d26bbbb4dbf71c1d6a63ad3c7900edfabe0fb9ec 100644
--- a/examples/SodShock_1D/makeIC.py
+++ b/examples/HydroTests/SodShock_1D/makeIC.py
@@ -92,10 +92,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SodShock_1D/plotSolution.py b/examples/HydroTests/SodShock_1D/plotSolution.py
similarity index 86%
rename from examples/SodShock_1D/plotSolution.py
rename to examples/HydroTests/SodShock_1D/plotSolution.py
index e001a8d87a03cb246be63ab10d245f95eb1a7ce7..12ae8a9cf26fb715281ed00bf565c5c8d9a234fb 100644
--- a/examples/SodShock_1D/plotSolution.py
+++ b/examples/HydroTests/SodShock_1D/plotSolution.py
@@ -70,11 +70,11 @@ snap = int(sys.argv[1])
 sim = h5py.File("sodShock_%04d.hdf5"%snap, "r")
 boxSize = sim["/Header"].attrs["BoxSize"][0]
 time = sim["/Header"].attrs["Time"][0]
-scheme = sim["/HydroScheme"].attrs["Scheme"]
-kernel = sim["/HydroScheme"].attrs["Kernel function"]
+scheme = str(sim["/HydroScheme"].attrs["Scheme"])
+kernel = str(sim["/HydroScheme"].attrs["Kernel function"])
 neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"]
 eta = sim["/HydroScheme"].attrs["Kernel eta"]
-git = sim["Code"].attrs["Git Revision"]
+git = str(sim["Code"].attrs["Git Revision"])
 
 x = sim["/PartType0/Coordinates"][:,0]
 v = sim["/PartType0/Velocities"][:,0]
@@ -82,6 +82,16 @@ u = sim["/PartType0/InternalEnergy"][:]
 S = sim["/PartType0/Entropy"][:]
 P = sim["/PartType0/Pressure"][:]
 rho = sim["/PartType0/Density"][:]
+try:
+    alpha = sim["/PartType0/Viscosity"][:]
+    plot_alpha = True 
+except:
+    plot_alpha = False
+try:
+    alpha_diff = sim["PartType0/Diffusion"][:]
+    plot_alpha_diff = True
+except:
+    plot_alpha_diff = False
 
 N = 1000  # Number of points
 x_min = -1.
@@ -234,12 +244,20 @@ ylim(-0.1, 0.95)
 
 # Density profile --------------------------------
 subplot(232)
-plot(x, rho, '.', color='r', ms=4.0)
-plot(x_s, rho_s, '--', color='k', alpha=0.8, lw=1.2)
+if plot_alpha_diff:
+    plot(x, alpha_diff, '.', color='r', ms=4.0)
+    ylabel(r"${\rm{Diffusion}}~\alpha$", labelpad=0)
+    # Show location of contact discontinuity
+    plot([x_34, x_34], [-100, 100], color="k", alpha=0.5, ls="dashed", lw=1.2)
+    ylim(0, 1)
+else:
+    plot(x, rho, '.', color='r', ms=4.0)
+    plot(x_s, rho_s, '--', color='k', alpha=0.8, lw=1.2)
+    ylabel("${\\rm{Density}}~\\rho$", labelpad=0)
+    ylim(0.05, 1.1)
+
 xlabel("${\\rm{Position}}~x$", labelpad=0)
-ylabel("${\\rm{Density}}~\\rho$", labelpad=0)
 xlim(-0.5, 0.5)
-ylim(0.05, 1.1)
 
 # Pressure profile --------------------------------
 subplot(233)
@@ -252,21 +270,30 @@ ylim(0.01, 1.1)
 
 # Internal energy profile -------------------------
 subplot(234)
-plot(x, u, '.', color='r', ms=4.0)
+scatter(x, u, marker='.', c=alpha_diff, s=4.0)
 plot(x_s, u_s, '--', color='k', alpha=0.8, lw=1.2)
 xlabel("${\\rm{Position}}~x$", labelpad=0)
 ylabel("${\\rm{Internal~Energy}}~u$", labelpad=0)
 xlim(-0.5, 0.5)
 ylim(0.8, 2.2)
 
-# Entropy profile ---------------------------------
+# Entropy/alpha profile ---------------------------------
 subplot(235)
-plot(x, S, '.', color='r', ms=4.0)
-plot(x_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+
+if plot_alpha:
+    plot(x, alpha, '.', color='r', ms=4.0)
+    ylabel(r"${\rm{Viscosity}}~\alpha$", labelpad=0)
+    # Show location of shock
+    plot([x_56, x_56], [-100, 100], color="k", alpha=0.5, ls="dashed", lw=1.2)
+    ylim(0, 1)
+else:
+    plot(x, S, '.', color='r', ms=4.0)
+    plot(x_s, s_s, '--', color='k', alpha=0.8, lw=1.2)
+    ylabel("${\\rm{Entropy}}~S$", labelpad=0)
+    ylim(0.8, 3.8)
+
 xlabel("${\\rm{Position}}~x$", labelpad=0)
-ylabel("${\\rm{Entropy}}~S$", labelpad=0)
 xlim(-0.5, 0.5)
-ylim(0.8, 3.8)
 
 # Information -------------------------------------
 subplot(236, frameon=False)
@@ -284,5 +311,6 @@ ylim(0, 1)
 xticks([])
 yticks([])
 
+tight_layout()
 
 savefig("SodShock.png", dpi=200)
diff --git a/examples/SodShock_1D/run.sh b/examples/HydroTests/SodShock_1D/run.sh
similarity index 79%
rename from examples/SodShock_1D/run.sh
rename to examples/HydroTests/SodShock_1D/run.sh
index 4be4254baa4a87b105a5f3c1bfbf9059348a1e9e..8be97f7b34c9947268ed44b44b2c445ddb8a717f 100755
--- a/examples/SodShock_1D/run.sh
+++ b/examples/HydroTests/SodShock_1D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 sodShock.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 sodShock.yml 2>&1 | tee output.log
 
 # Plot the result
 python plotSolution.py 1 
diff --git a/examples/SodShock_1D/sodShock.yml b/examples/HydroTests/SodShock_1D/sodShock.yml
similarity index 91%
rename from examples/SodShock_1D/sodShock.yml
rename to examples/HydroTests/SodShock_1D/sodShock.yml
index e827edadb9c287975d83214249d4fdd7734a5f6c..b936f50f6c2c7d9078c49fbad868aa5334498957 100644
--- a/examples/SodShock_1D/sodShock.yml
+++ b/examples/HydroTests/SodShock_1D/sodShock.yml
@@ -27,8 +27,12 @@ Statistics:
 SPH:
   resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  viscosity_alpha_min:   0.01
+  viscosity_alpha:       0.01
+  viscosity_alpha_max:   2.0
+  viscosity_length:      0.02
 
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sodShock.hdf5       # The file to read
-
+  periodic:   1
diff --git a/examples/HydroTests/SodShock_2D/getGlass.sh b/examples/HydroTests/SodShock_2D/getGlass.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f4cb4ebcb4b452b2b123462bc97eed532f43ba25
--- /dev/null
+++ b/examples/HydroTests/SodShock_2D/getGlass.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassPlane_128.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassPlane_48.hdf5
diff --git a/examples/SodShock_2D/makeIC.py b/examples/HydroTests/SodShock_2D/makeIC.py
similarity index 97%
rename from examples/SodShock_2D/makeIC.py
rename to examples/HydroTests/SodShock_2D/makeIC.py
index 850ca24f54c39990a9b0c54c0d2f361a2aa01e95..2d3bd75fcc41e0fee6dd7cfde62873980bbc7143 100644
--- a/examples/SodShock_2D/makeIC.py
+++ b/examples/HydroTests/SodShock_2D/makeIC.py
@@ -98,10 +98,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SodShock_2D/plotSolution.py b/examples/HydroTests/SodShock_2D/plotSolution.py
similarity index 100%
rename from examples/SodShock_2D/plotSolution.py
rename to examples/HydroTests/SodShock_2D/plotSolution.py
diff --git a/examples/SodShock_2D/run.sh b/examples/HydroTests/SodShock_2D/run.sh
similarity index 84%
rename from examples/SodShock_2D/run.sh
rename to examples/HydroTests/SodShock_2D/run.sh
index 9e6bbfdf1c0a7c206ce6966fdca7b20a28047dd8..a11c6291a48447b2f64aef458c01036e4ed73441 100755
--- a/examples/SodShock_2D/run.sh
+++ b/examples/HydroTests/SodShock_2D/run.sh
@@ -13,6 +13,6 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 sodShock.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 sodShock.yml 2>&1 | tee output.log
 
 python plotSolution.py 1
diff --git a/examples/SodShock_2D/sodShock.yml b/examples/HydroTests/SodShock_2D/sodShock.yml
similarity index 98%
rename from examples/SodShock_2D/sodShock.yml
rename to examples/HydroTests/SodShock_2D/sodShock.yml
index 51a188b6d4537d490cb837a03dab15f74c3b083c..b831dd78278fea619d75e2db8806cf00d8faf575 100644
--- a/examples/SodShock_2D/sodShock.yml
+++ b/examples/HydroTests/SodShock_2D/sodShock.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/HydroTests/SodShock_3D/getGlass.sh b/examples/HydroTests/SodShock_3D/getGlass.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f61b61d4e6c51b44576fd7cdd6242cb9f0133039
--- /dev/null
+++ b/examples/HydroTests/SodShock_3D/getGlass.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassCube_64.hdf5
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassCube_32.hdf5
diff --git a/examples/SodShock_3D/makeIC.py b/examples/HydroTests/SodShock_3D/makeIC.py
similarity index 97%
rename from examples/SodShock_3D/makeIC.py
rename to examples/HydroTests/SodShock_3D/makeIC.py
index c71c07c6c97bb715c580f747cf8d39ddf08445c3..69f1bc506680d3f2f149c0fd7b75b069f9b00b64 100644
--- a/examples/SodShock_3D/makeIC.py
+++ b/examples/HydroTests/SodShock_3D/makeIC.py
@@ -98,10 +98,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SodShock_3D/plotSolution.py b/examples/HydroTests/SodShock_3D/plotSolution.py
similarity index 100%
rename from examples/SodShock_3D/plotSolution.py
rename to examples/HydroTests/SodShock_3D/plotSolution.py
diff --git a/examples/SodShock_3D/run.sh b/examples/HydroTests/SodShock_3D/run.sh
similarity index 84%
rename from examples/SodShock_3D/run.sh
rename to examples/HydroTests/SodShock_3D/run.sh
index 8ed85baf73425b75f402c491a3c66785f6c6fce0..aceeacd331e9d2e467e1cf42079dcb492ad0c631 100755
--- a/examples/SodShock_3D/run.sh
+++ b/examples/HydroTests/SodShock_3D/run.sh
@@ -13,6 +13,6 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 sodShock.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 sodShock.yml 2>&1 | tee output.log
 
 python plotSolution.py 1
diff --git a/examples/SodShock_3D/sodShock.yml b/examples/HydroTests/SodShock_3D/sodShock.yml
similarity index 98%
rename from examples/SodShock_3D/sodShock.yml
rename to examples/HydroTests/SodShock_3D/sodShock.yml
index 6042c8090d00fef5467a7fed3d6f5a104c626f43..b2d783cd74d66a8eaa3cbbf4b08fc686b0298244 100644
--- a/examples/SodShock_3D/sodShock.yml
+++ b/examples/HydroTests/SodShock_3D/sodShock.yml
@@ -32,4 +32,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./sodShock.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/SquareTest_2D/makeIC.py b/examples/HydroTests/SquareTest_2D/makeIC.py
similarity index 97%
rename from examples/SquareTest_2D/makeIC.py
rename to examples/HydroTests/SquareTest_2D/makeIC.py
index 186e653124a6ff62a964c37cf0fb2220f1152a0e..12a394873edf42f7ecfdf07c9795b62e3ad89745 100644
--- a/examples/SquareTest_2D/makeIC.py
+++ b/examples/HydroTests/SquareTest_2D/makeIC.py
@@ -96,10 +96,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = fileOutput.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = fileOutput.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SquareTest_2D/plotSolution.py b/examples/HydroTests/SquareTest_2D/plotSolution.py
similarity index 100%
rename from examples/SquareTest_2D/plotSolution.py
rename to examples/HydroTests/SquareTest_2D/plotSolution.py
diff --git a/examples/SquareTest_2D/run.sh b/examples/HydroTests/SquareTest_2D/run.sh
similarity index 79%
rename from examples/SquareTest_2D/run.sh
rename to examples/HydroTests/SquareTest_2D/run.sh
index 7d77e9c5bd89732970b47feb3a297ef92b345a01..dae0d742e706d73f3a5efc26a9d82ac59c883757 100755
--- a/examples/SquareTest_2D/run.sh
+++ b/examples/HydroTests/SquareTest_2D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 square.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 square.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 5
diff --git a/examples/SquareTest_2D/square.yml b/examples/HydroTests/SquareTest_2D/square.yml
similarity index 98%
rename from examples/SquareTest_2D/square.yml
rename to examples/HydroTests/SquareTest_2D/square.yml
index b700c441a619ef8faac52656909567c944e344c3..54e0effa676cd5b1233ae7c38aded18d089f0ef2 100644
--- a/examples/SquareTest_2D/square.yml
+++ b/examples/HydroTests/SquareTest_2D/square.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./square.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/UniformBox_2D/makeIC.py b/examples/HydroTests/UniformBox_2D/makeIC.py
similarity index 97%
rename from examples/UniformBox_2D/makeIC.py
rename to examples/HydroTests/UniformBox_2D/makeIC.py
index 642896c6ec406a5a75127e024d19775ea4a8e09b..36bb1ba6118a31db3251a1cd7f332f01b2ba3df1 100644
--- a/examples/UniformBox_2D/makeIC.py
+++ b/examples/HydroTests/UniformBox_2D/makeIC.py
@@ -85,10 +85,6 @@ grp.attrs["Flag_Entropy_ICs"] = [0, 0, 0, 0, 0, 0]
 grp.attrs["NumPart_Total"] = numPart
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/UniformBox_2D/run.sh b/examples/HydroTests/UniformBox_2D/run.sh
similarity index 74%
rename from examples/UniformBox_2D/run.sh
rename to examples/HydroTests/UniformBox_2D/run.sh
index ee3ef109968a65e2437ea17b42013266195d3314..aad344187dfac468c58baa7b32e45024c8ef49b1 100755
--- a/examples/UniformBox_2D/run.sh
+++ b/examples/HydroTests/UniformBox_2D/run.sh
@@ -7,4 +7,4 @@ then
     python makeIC.py 100
 fi
 
-../swift -s -t 16 uniformPlane.yml 2>&1 | tee output.log
+../../swift --hydro --threads=16 uniformPlane.yml 2>&1 | tee output.log
diff --git a/examples/UniformBox_2D/uniformPlane.yml b/examples/HydroTests/UniformBox_2D/uniformPlane.yml
similarity index 98%
rename from examples/UniformBox_2D/uniformPlane.yml
rename to examples/HydroTests/UniformBox_2D/uniformPlane.yml
index 58fe0d50557db0c0624fe89cbde888d2c92775e5..77f53d59c497b10b1c95ce5dcb763fa8bffcd5ca 100644
--- a/examples/UniformBox_2D/uniformPlane.yml
+++ b/examples/HydroTests/UniformBox_2D/uniformPlane.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./uniformPlane.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/UniformBox_3D/makeIC.py b/examples/HydroTests/UniformBox_3D/makeIC.py
similarity index 97%
rename from examples/UniformBox_3D/makeIC.py
rename to examples/HydroTests/UniformBox_3D/makeIC.py
index 01e37c67b6e2eec2984d62f4ffd503b23b5bd9ec..8311aae951f921b4c7f759ba09cc8fe73cf4a9f1 100644
--- a/examples/UniformBox_3D/makeIC.py
+++ b/examples/HydroTests/UniformBox_3D/makeIC.py
@@ -57,10 +57,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = periodic
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/UniformBox_3D/makeICbig.py b/examples/HydroTests/UniformBox_3D/makeICbig.py
similarity index 100%
rename from examples/UniformBox_3D/makeICbig.py
rename to examples/HydroTests/UniformBox_3D/makeICbig.py
diff --git a/examples/UniformBox_3D/run.sh b/examples/HydroTests/UniformBox_3D/run.sh
similarity index 74%
rename from examples/UniformBox_3D/run.sh
rename to examples/HydroTests/UniformBox_3D/run.sh
index 08891cdd08fccf8f43089951e94dddb33e162030..f93c53a14c5441a7df8936f3507ce9cbe8c2294c 100755
--- a/examples/UniformBox_3D/run.sh
+++ b/examples/HydroTests/UniformBox_3D/run.sh
@@ -7,4 +7,4 @@ then
     python makeIC.py 100
 fi
 
-../swift -s -t 16 uniformBox.yml 2>&1 | tee output.log
+../../swift --hydro --threads=16 uniformBox.yml 2>&1 | tee output.log
diff --git a/examples/UniformBox_3D/uniformBox.yml b/examples/HydroTests/UniformBox_3D/uniformBox.yml
similarity index 98%
rename from examples/UniformBox_3D/uniformBox.yml
rename to examples/HydroTests/UniformBox_3D/uniformBox.yml
index 17dd5632edd345802402cb9c6d1dcf184e909806..202ff8298fe763a8c194ab4570b1252fe352dccc 100644
--- a/examples/UniformBox_3D/uniformBox.yml
+++ b/examples/HydroTests/UniformBox_3D/uniformBox.yml
@@ -31,3 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./uniformBox.hdf5     # The file to read
+  periodic:   1
diff --git a/examples/VacuumSpherical_2D/getGlass.sh b/examples/HydroTests/VacuumSpherical_2D/getGlass.sh
similarity index 100%
rename from examples/VacuumSpherical_2D/getGlass.sh
rename to examples/HydroTests/VacuumSpherical_2D/getGlass.sh
diff --git a/examples/VacuumSpherical_2D/getReference.sh b/examples/HydroTests/VacuumSpherical_2D/getReference.sh
similarity index 100%
rename from examples/VacuumSpherical_2D/getReference.sh
rename to examples/HydroTests/VacuumSpherical_2D/getReference.sh
diff --git a/examples/VacuumSpherical_2D/makeIC.py b/examples/HydroTests/VacuumSpherical_2D/makeIC.py
similarity index 96%
rename from examples/VacuumSpherical_2D/makeIC.py
rename to examples/HydroTests/VacuumSpherical_2D/makeIC.py
index 498f1b5bc5277188d8ff8d34a5ec24cd314332d4..05f0d8414cfa88755ecceb2be757e24ca3cefdde 100644
--- a/examples/VacuumSpherical_2D/makeIC.py
+++ b/examples/HydroTests/VacuumSpherical_2D/makeIC.py
@@ -77,10 +77,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/VacuumSpherical_2D/plotSolution.py b/examples/HydroTests/VacuumSpherical_2D/plotSolution.py
similarity index 100%
rename from examples/VacuumSpherical_2D/plotSolution.py
rename to examples/HydroTests/VacuumSpherical_2D/plotSolution.py
diff --git a/examples/VacuumSpherical_2D/run.sh b/examples/HydroTests/VacuumSpherical_2D/run.sh
similarity index 90%
rename from examples/VacuumSpherical_2D/run.sh
rename to examples/HydroTests/VacuumSpherical_2D/run.sh
index 51d32b4de679877741b7ecd74238fecb785579e7..54c5efdf9623cd11e504753514052f05ad1b36eb 100755
--- a/examples/VacuumSpherical_2D/run.sh
+++ b/examples/HydroTests/VacuumSpherical_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 vacuum.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 vacuum.yml 2>&1 | tee output.log
 
 # Get the 1D high resolution reference result if not present.
 if [ ! -e vacuumSpherical2D_exact.txt ]
diff --git a/examples/VacuumSpherical_2D/vacuum.yml b/examples/HydroTests/VacuumSpherical_2D/vacuum.yml
similarity index 98%
rename from examples/VacuumSpherical_2D/vacuum.yml
rename to examples/HydroTests/VacuumSpherical_2D/vacuum.yml
index 881b155b62c7f1f2af12a1d013ff5c05f1c16a88..1d5642d5c1b645808229c5c6b99fb6d319351880 100644
--- a/examples/VacuumSpherical_2D/vacuum.yml
+++ b/examples/HydroTests/VacuumSpherical_2D/vacuum.yml
@@ -31,4 +31,4 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./vacuum.hdf5       # The file to read
-
+  periodic:   1
diff --git a/examples/VacuumSpherical_3D/getGlass.sh b/examples/HydroTests/VacuumSpherical_3D/getGlass.sh
old mode 100755
new mode 100644
similarity index 100%
rename from examples/VacuumSpherical_3D/getGlass.sh
rename to examples/HydroTests/VacuumSpherical_3D/getGlass.sh
diff --git a/examples/VacuumSpherical_3D/getReference.sh b/examples/HydroTests/VacuumSpherical_3D/getReference.sh
similarity index 100%
rename from examples/VacuumSpherical_3D/getReference.sh
rename to examples/HydroTests/VacuumSpherical_3D/getReference.sh
diff --git a/examples/VacuumSpherical_3D/makeIC.py b/examples/HydroTests/VacuumSpherical_3D/makeIC.py
similarity index 97%
rename from examples/VacuumSpherical_3D/makeIC.py
rename to examples/HydroTests/VacuumSpherical_3D/makeIC.py
index d67a30707a904268a09641210a6a3bfcbf305dad..dd4ddd7e8a8d6335e4d3d3b383c54bf301a06f1d 100644
--- a/examples/VacuumSpherical_3D/makeIC.py
+++ b/examples/HydroTests/VacuumSpherical_3D/makeIC.py
@@ -80,10 +80,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/VacuumSpherical_3D/plotSolution.py b/examples/HydroTests/VacuumSpherical_3D/plotSolution.py
similarity index 100%
rename from examples/VacuumSpherical_3D/plotSolution.py
rename to examples/HydroTests/VacuumSpherical_3D/plotSolution.py
diff --git a/examples/VacuumSpherical_3D/run.sh b/examples/HydroTests/VacuumSpherical_3D/run.sh
similarity index 89%
rename from examples/VacuumSpherical_3D/run.sh
rename to examples/HydroTests/VacuumSpherical_3D/run.sh
index a136929678f745f6a3d0859ba146e1bc1c6c43d0..32a55a7478cbb70d8042154b19964269f337488c 100755
--- a/examples/VacuumSpherical_3D/run.sh
+++ b/examples/HydroTests/VacuumSpherical_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 16 vacuum.yml 2>&1 | tee output.log
+../../swift --hydro --threads=16 vacuum.yml 2>&1 | tee output.log
 
 # Get the reference solution if it is not present.
 if [ ! -e vacuumSpherical3D_exact.txt ]
diff --git a/examples/VacuumSpherical_3D/vacuum.yml b/examples/HydroTests/VacuumSpherical_3D/vacuum.yml
similarity index 98%
rename from examples/VacuumSpherical_3D/vacuum.yml
rename to examples/HydroTests/VacuumSpherical_3D/vacuum.yml
index 8792f029d97f413882ae0ea6c8603d64efaddbfa..851abf74441a48a58eac551bd0526f1d4b6e4ce0 100644
--- a/examples/VacuumSpherical_3D/vacuum.yml
+++ b/examples/HydroTests/VacuumSpherical_3D/vacuum.yml
@@ -32,4 +32,6 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./vacuum.hdf5       # The file to read
+  periodic:   1
 
+  
\ No newline at end of file
diff --git a/examples/Vacuum_1D/makeIC.py b/examples/HydroTests/Vacuum_1D/makeIC.py
similarity index 96%
rename from examples/Vacuum_1D/makeIC.py
rename to examples/HydroTests/Vacuum_1D/makeIC.py
index 067304ec951182da862cf2812cdc68a51a56d23b..5b057b340cbfa9718fb230ab1af839bc63678032 100644
--- a/examples/Vacuum_1D/makeIC.py
+++ b/examples/HydroTests/Vacuum_1D/makeIC.py
@@ -63,10 +63,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 1
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Vacuum_1D/plotSolution.py b/examples/HydroTests/Vacuum_1D/plotSolution.py
similarity index 100%
rename from examples/Vacuum_1D/plotSolution.py
rename to examples/HydroTests/Vacuum_1D/plotSolution.py
diff --git a/examples/Vacuum_1D/run.sh b/examples/HydroTests/Vacuum_1D/run.sh
similarity index 80%
rename from examples/Vacuum_1D/run.sh
rename to examples/HydroTests/Vacuum_1D/run.sh
index b141f91f877c5b553281e53cdf02fbea948b0a97..c5f7e0344e7a6517dae51390e2eae4acd80a5f5f 100755
--- a/examples/Vacuum_1D/run.sh
+++ b/examples/HydroTests/Vacuum_1D/run.sh
@@ -8,7 +8,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 1 vacuum.yml 2>&1 | tee output.log
+../../swift --hydro --threads=1 vacuum.yml 2>&1 | tee output.log
 
 # Plot the result
 python plotSolution.py 1
diff --git a/examples/Vacuum_1D/vacuum.yml b/examples/HydroTests/Vacuum_1D/vacuum.yml
similarity index 98%
rename from examples/Vacuum_1D/vacuum.yml
rename to examples/HydroTests/Vacuum_1D/vacuum.yml
index 5ef5ce3da68febb086a14ad1a2207711f680d9ff..0be6427e50e1f674f7f59d4b865f2c4f9605a378 100644
--- a/examples/Vacuum_1D/vacuum.yml
+++ b/examples/HydroTests/Vacuum_1D/vacuum.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./vacuum.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/Vacuum_2D/getGlass.sh b/examples/HydroTests/Vacuum_2D/getGlass.sh
similarity index 100%
rename from examples/Vacuum_2D/getGlass.sh
rename to examples/HydroTests/Vacuum_2D/getGlass.sh
diff --git a/examples/Vacuum_2D/makeIC.py b/examples/HydroTests/Vacuum_2D/makeIC.py
similarity index 96%
rename from examples/Vacuum_2D/makeIC.py
rename to examples/HydroTests/Vacuum_2D/makeIC.py
index ef267c092cafdb95457d5adad1e6858df0e14bd3..4d9181b83c0e383d0e3fb0dc6ca79dbda6f88891 100644
--- a/examples/Vacuum_2D/makeIC.py
+++ b/examples/HydroTests/Vacuum_2D/makeIC.py
@@ -71,10 +71,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 2
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Vacuum_2D/plotSolution.py b/examples/HydroTests/Vacuum_2D/plotSolution.py
similarity index 100%
rename from examples/Vacuum_2D/plotSolution.py
rename to examples/HydroTests/Vacuum_2D/plotSolution.py
diff --git a/examples/Vacuum_2D/run.sh b/examples/HydroTests/Vacuum_2D/run.sh
similarity index 85%
rename from examples/Vacuum_2D/run.sh
rename to examples/HydroTests/Vacuum_2D/run.sh
index 5c0b2ca5e19e33e813b7ff478ed4494752c0a2a5..08f3ba19d69865112db6ac68f0264e19b6b4363b 100755
--- a/examples/Vacuum_2D/run.sh
+++ b/examples/HydroTests/Vacuum_2D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 4 vacuum.yml 2>&1 | tee output.log
+../../swift --hydro --threads=4 vacuum.yml 2>&1 | tee output.log
 
 # Plot the result
 python plotSolution.py 1
diff --git a/examples/Vacuum_2D/vacuum.yml b/examples/HydroTests/Vacuum_2D/vacuum.yml
similarity index 98%
rename from examples/Vacuum_2D/vacuum.yml
rename to examples/HydroTests/Vacuum_2D/vacuum.yml
index 5ef5ce3da68febb086a14ad1a2207711f680d9ff..0be6427e50e1f674f7f59d4b865f2c4f9605a378 100644
--- a/examples/Vacuum_2D/vacuum.yml
+++ b/examples/HydroTests/Vacuum_2D/vacuum.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./vacuum.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/Vacuum_3D/getGlass.sh b/examples/HydroTests/Vacuum_3D/getGlass.sh
similarity index 100%
rename from examples/Vacuum_3D/getGlass.sh
rename to examples/HydroTests/Vacuum_3D/getGlass.sh
diff --git a/examples/Vacuum_3D/makeIC.py b/examples/HydroTests/Vacuum_3D/makeIC.py
similarity index 96%
rename from examples/Vacuum_3D/makeIC.py
rename to examples/HydroTests/Vacuum_3D/makeIC.py
index 09c3cb4d6f5525d54fab59643ab4a7d0540a2a92..cee2d28d5190305a3536315001453e7595b7c7f2 100644
--- a/examples/Vacuum_3D/makeIC.py
+++ b/examples/HydroTests/Vacuum_3D/makeIC.py
@@ -73,10 +73,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-#Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 #Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/Vacuum_3D/plotSolution.py b/examples/HydroTests/Vacuum_3D/plotSolution.py
similarity index 100%
rename from examples/Vacuum_3D/plotSolution.py
rename to examples/HydroTests/Vacuum_3D/plotSolution.py
diff --git a/examples/Vacuum_3D/run.sh b/examples/HydroTests/Vacuum_3D/run.sh
similarity index 85%
rename from examples/Vacuum_3D/run.sh
rename to examples/HydroTests/Vacuum_3D/run.sh
index 5029626f67659bba1f22600bb5bd38859dd805ce..b75803f97fdd60a78efe9b05267e265d51fb1f1f 100755
--- a/examples/Vacuum_3D/run.sh
+++ b/examples/HydroTests/Vacuum_3D/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -s -t 16 vacuum.yml 2>&1 | tee output.log
+../../swift --hydro --threads=16 vacuum.yml 2>&1 | tee output.log
 
 # Plot the result
 python plotSolution.py 1
diff --git a/examples/Vacuum_3D/vacuum.yml b/examples/HydroTests/Vacuum_3D/vacuum.yml
similarity index 98%
rename from examples/Vacuum_3D/vacuum.yml
rename to examples/HydroTests/Vacuum_3D/vacuum.yml
index cf44d2441f5009d2fc75084a2c872e3618e40912..49bd9747d677bfdf64009bd1e02a86bc52a8db9c 100644
--- a/examples/Vacuum_3D/vacuum.yml
+++ b/examples/HydroTests/Vacuum_3D/vacuum.yml
@@ -32,4 +32,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./vacuum.hdf5       # The file to read
+  periodic:   1
 
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/angularmomentum.py b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/angularmomentum.py
new file mode 100755
index 0000000000000000000000000000000000000000..4398dfeb8b079143886c5565e7667f72fc0bdcef
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/angularmomentum.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+import scipy.optimize as sco
+
+
+Nmax = 2001
+steps = 10
+angmomcomp = False
+
+iterarray = np.arange(0, Nmax + 1, steps)
+Lxtot = np.zeros(len(iterarray))
+Lytot = np.zeros(len(iterarray))
+Lztot = np.zeros(len(iterarray))
+Ltot = np.zeros(len(iterarray))
+time_array = np.zeros(len(iterarray))
+
+
+for i in iterarray:
+    f = h5py.File("output_%04d.hdf5" % i, "r")
+
+    boxsize = f["Header"].attrs["BoxSize"] / 2.0
+
+    time_array[int(i / steps)] = f["Header"].attrs["Time"]
+
+    particles = f["PartType4"]
+    coordinates = particles["Coordinates"][:, :]
+    velocities = particles["Velocities"][:, :]
+    masses = particles["Masses"][:]
+
+    R = (
+        (coordinates[:, 0] - boxsize[0]) ** 2 + (coordinates[:, 1] - boxsize[1]) ** 2
+    ) ** 0.5
+    X = np.abs(coordinates[:, 0] - boxsize[0])
+    Y = np.abs(coordinates[:, 1] - boxsize[1])
+    Z = np.abs(coordinates[:, 2] - boxsize[2])
+
+    vx = velocities[:, 0]
+    vy = velocities[:, 1]
+    vz = velocities[:, 2]
+
+    Lx = (Y * vz - Z * vy) * masses
+    Ly = (Z * vx - X * vz) * masses
+    Lz = (X * vy - Y * vx) * masses
+
+    L = (Lx ** 2 + Ly ** 2 + Lz ** 2) ** 0.5
+
+    Lxtot[int(i / steps)] = np.sum(Lx)
+    Lytot[int(i / steps)] = np.sum(Ly)
+    Lztot[int(i / steps)] = np.sum(Lz)
+    Ltot[int(i / steps)] = np.sum(L)
+
+time_array[-1] = 2.0
+if angmomcomp:
+    plt.plot(time_array, Lxtot / Lxtot[0] - 1, label="Lx total")
+    plt.plot(time_array, Lytot / Lytot[0] - 1, label="Ly total")
+    plt.plot(time_array, Lztot / Lztot[0] - 1, label="Lz total")
+plt.plot(time_array, Ltot / Ltot[0] - 1, label="L total")
+plt.xlabel("Time")
+plt.ylabel("ratio between current and zero angular momentum")
+plt.legend()
+plt.show()
+
+plt.semilogy(time_array, np.absolute(Ltot / Ltot[0] - 1))
+plt.xlabel("Time (Gyr)")
+plt.ylabel("Fractional change of total angular momentum")
+plt.savefig("Total_angular_momentum.png")
+plt.show()
+plt.close()
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/getIC.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a3d16db27aac06abda683a7bd75e72a275f8b9d4
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/3e11-star-only-DM-halo-galaxy.hdf5
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/isolated_galaxy.yml b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/isolated_galaxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..dccfb28a3f1c888d2a83b5e28b759a30a6928754
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/isolated_galaxy.yml
@@ -0,0 +1,43 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.9891E43   # 10^10 solar masses 
+  UnitLength_in_cgs:   3.08567758E21   # 1 kpc 
+  UnitVelocity_in_cgs: 1E5   # km/s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters for the self-gravity scheme
+Gravity:
+  mesh_side_length:       32        # Number of cells along each axis for the periodic gravity mesh.
+  eta:          0.025               # Constant dimensionless multiplier for time integration.
+  theta:        0.7                 # Opening angle (Multipole acceptance criterion).
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+
+# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
+TimeIntegration:
+  time_begin:        0.    # The starting time of the simulation (in internal units).
+  time_end:          1.    # The end time of the simulation (in internal units).
+  dt_min:            1e-6  # The minimal time-step size of the simulation (in internal units).
+  dt_max:            1e-2  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:   output      # Common part of the name of output files
+  time_first: 0.          # (Optional) Time of the first output if non-cosmological time-integration (in internal units)
+  delta_time: 0.001        # Time difference between consecutive outputs (in internal units)
+
+Scheduler:
+  max_top_level_cells:  96
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1e-2     # Time between statistics output
+  time_first:             0.     # (Optional) Time of the first stats output if non-cosmological time-integration (in internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  3e11-star-only-DM-halo-galaxy.hdf5  # The file to read
+  periodic:                    0    # Are we running with periodic ICs?
+
+ 
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/profilefit.py b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/profilefit.py
new file mode 100755
index 0000000000000000000000000000000000000000..e7755062ea45de4f42716b14c5896b0da676f001
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/profilefit.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from matplotlib.colors import BoundaryNorm
+from matplotlib.ticker import MaxNLocator
+import scipy.optimize as sco
+import os
+
+
+def linearfunc(x, a, b):
+    return a * x + b
+
+
+def radialfunc(r, h, A):
+    return A * np.exp(-r / h) * r
+
+
+def verticalfunc(z, A, z0, zoff):
+    return 2 * A * np.exp(-(z - zoff) / z0)
+
+
+def verticalfunc2(z, A, z0):
+    return 2 * A * np.exp(-(z) / z0)
+
+
+def verticalfunc3(z, A, z0, zoff, b):
+    return 2 * A * np.exp(-(z - zoff) / z0) + b
+
+
+Nmax = 2001
+steps = 10
+storefits = False
+logfit = True
+normalfit = False
+
+# if the user wants to store the indivudal fits
+if storefits:
+    if not os.path.exists("radial"):
+        os.mkdir("radial")
+        os.mkdir("vertical")
+        os.mkdir("histsnap")
+
+
+# Initialize the arrays
+R_ideal = np.linspace(0, 40, 100)
+Z_ideal = np.linspace(0, 10, 100)
+
+iterarray = np.arange(0, Nmax + 1, steps)
+
+Z0t = np.zeros(len(iterarray))
+Z0terr = np.zeros(len(iterarray))
+h0t = np.zeros(len(iterarray))
+h0terr = np.zeros(len(iterarray))
+Ar = np.zeros(len(iterarray))
+Arerr = np.zeros(len(iterarray))
+Az = np.zeros(len(iterarray))
+Azerr = np.zeros(len(iterarray))
+time_array = np.zeros(len(iterarray))
+
+ar = np.zeros(len(iterarray))
+arerr = np.zeros(len(iterarray))
+br = np.zeros(len(iterarray))
+brerr = np.zeros(len(iterarray))
+az = np.zeros(len(iterarray))
+azerr = np.zeros(len(iterarray))
+bz = np.zeros(len(iterarray))
+bzerr = np.zeros(len(iterarray))
+eps = 1e-6
+
+
+for i in iterarray:
+    # Getting the data from the snapshots
+    f = h5py.File("output_%04d.hdf5" % i, "r")
+
+    boxsize = f["Header"].attrs["BoxSize"] / 2.0
+
+    time_array[int(i / steps)] = f["Header"].attrs["Time"]
+
+    particles = f["PartType4"]
+    coordinates = particles["Coordinates"][:, :]
+    masses = particles["Masses"][:]
+
+    R = (
+        (coordinates[:, 0] - boxsize[0]) ** 2 + (coordinates[:, 1] - boxsize[1]) ** 2
+    ) ** 0.5
+    Z = np.abs(coordinates[:, 1] - boxsize[1])
+
+    # Bin the coordinates to make them suitable for fitting
+    Rhist = np.histogram(R, bins=100, range=[0, 40], normed=True)
+    Zhist = np.histogram(Z, bins=100, range=[0, 10.0], normed=True)
+
+    # Create correct variables for fitting
+    Ry = Rhist[0]
+    Rx = (Rhist[1][1:] + Rhist[1][: len(Rhist[0])]) / 2.0
+
+    Zy = Zhist[0]
+    Zx = (Zhist[1][1:] + Zhist[1][: len(Zhist[0])]) / 2.0
+
+    # Fit with two methods: non-linear LSQ and linear LSQ in log space
+    bestsolR = sco.curve_fit(radialfunc, Rx[10:], Ry[10:], p0=[2.0, 0.2])
+    bestsolZ = sco.curve_fit(verticalfunc, Zx[40:], Zy[40:])
+    bestsolRlog = sco.curve_fit(linearfunc, Rx[10:], np.log10(Ry[10:] + eps))
+    bestsolZlog = sco.curve_fit(linearfunc, Zx[40:], np.log10(Zy[40:] + eps))
+
+    # Store variables
+    h0t[int(i / steps)] = bestsolR[0][0]
+    Z0t[int(i / steps)] = bestsolZ[0][1]
+    Ar[int(i / steps)] = bestsolR[0][1]
+    Az[int(i / steps)] = bestsolZ[0][0]
+    Z0terr[int(i / steps)] = (bestsolZ[1][1, 1]) ** 0.5
+    h0terr[int(i / steps)] = (bestsolR[1][0, 0]) ** 0.5
+    Arerr[int(i / steps)] = (bestsolR[1][1, 1]) ** 0.5
+    Azerr[int(i / steps)] = (bestsolZ[1][0, 0]) ** 0.5
+
+    ar[int(i / steps)] = bestsolRlog[0][0]
+    arerr[int(i / steps)] = (bestsolRlog[1][0, 0]) ** 0.5
+    br[int(i / steps)] = bestsolRlog[0][1]
+    brerr[int(i / steps)] = (bestsolRlog[1][1, 1]) ** 0.5
+    az[int(i / steps)] = bestsolZlog[0][0]
+    azerr[int(i / steps)] = (bestsolZlog[1][0, 0]) ** 0.5
+    bz[int(i / steps)] = bestsolZlog[0][1]
+    bzerr[int(i / steps)] = (bestsolZlog[1][1, 1]) ** 0.5
+
+    if storefits:
+        plt.step(Rx, Ry)
+        plt.plot(
+            R_ideal,
+            radialfunc(R_ideal, bestsolR[0][0], bestsolR[0][1]),
+            label="Non linear LSQ",
+        )
+        plt.plot(
+            R_ideal,
+            10 ** (linearfunc(R_ideal, bestsolRlog[0][0], bestsolRlog[0][1])),
+            label="Linear LSQ",
+        )
+        plt.xlim(0, 40)
+        plt.ylim(0, 0.25)
+        plt.xlabel("R (kpc)")
+        plt.ylabel("Probability")
+        plt.savefig("./radial/radialsnap%04d.png" % i)
+        plt.close()
+
+        plt.step(Zx, Zy)
+        plt.plot(
+            Z_ideal,
+            verticalfunc(Z_ideal, bestsolZ[0][0], bestsolZ[0][1], bestsolZ[0][2]),
+            label="Non linear LSQ",
+        )
+        plt.plot(
+            Z_ideal,
+            10 ** (linearfunc(Z_ideal, bestsolZlog[0][0], bestsolZlog[0][1])),
+            label="Linear LSQ",
+        )
+        plt.xlim(0, 10.0)
+        plt.ylim(0, 0.6)
+        plt.xlabel("z (kpc)")
+        plt.ylabel("Probability")
+        plt.savefig("./vertical/verticalsnap%04d.png" % i)
+        plt.close()
+
+time_array[-1] = 2.0
+
+ax = plt.subplot(111)
+ax.set_yscale("log")
+if logfit:
+    plt.errorbar(
+        time_array,
+        np.absolute(az / (az[0]) - 1),
+        yerr=azerr / (az[0]),
+        label="z0 scale height (Log space)",
+    )
+    plt.errorbar(
+        time_array,
+        np.absolute(ar / (ar[0]) - 1),
+        yerr=arerr / (ar[0]),
+        label="h scale lenght (Log space)",
+    )
+if normalfit:
+    plt.errorbar(
+        time_array,
+        np.absolute(Z0t / (Z0t[0]) - 1),
+        yerr=Z0terr / (Z0t[0]),
+        label="z0 scale height (normal space)",
+    )
+    plt.errorbar(
+        time_array,
+        np.absolute(h0t / (h0t[0]) - 1),
+        yerr=h0terr / (h0t[0]),
+        label="h scale height (normal space)",
+    )
+ax.set_xlabel("Time (Gyr)")
+ax.set_ylabel("Fractional difference")
+plt.legend()
+plt.savefig("Fitdifference-witherror.pdf")
+plt.close()
+
+
+ax = plt.subplot(111)
+ax.set_yscale("log")
+if logfit:
+    plt.plot(
+        time_array, np.absolute(az / (az[0]) - 1), label="z0 scale height (Log space)"
+    )
+    plt.plot(
+        time_array, np.absolute(ar / (ar[0]) - 1), label="h scale lenght (Log space)"
+    )
+if normalfit:
+    plt.plot(
+        time_array,
+        np.absolute(Z0t / (Z0t[0]) - 1),
+        label="z0 scale height (normal space)",
+    )
+    plt.plot(
+        time_array,
+        np.absolute(h0t / (h0t[0]) - 1),
+        label="h scale height (normal space)",
+    )
+ax.set_xlabel("Time (Gyr)")
+ax.set_ylabel("Fractional difference")
+plt.legend()
+plt.savefig("Fitdifference.pdf")
+plt.show()
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/run.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..604c01d09a3553c598bb5691e0078ae52592670d
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_dmparticles/run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if [ ! -e reddeathgalaxywithDM.hdf5 ]
+then
+    echo "Fetching initial conditons for the isolated galaxy with an external potential ..."
+    ./getIC.sh
+fi 
+
+../../swift --external-gravity --self-gravity --stars --threads=16 isolated_galaxy.yml 2>&1 | tee output.log
+
+
+echo "Make plots of conservation of total angular momentum" 
+if command -v python3 &>/dev/null; then
+    python3 angularmomentum.py 
+else
+    python angularmomentum.py 
+fi
+
+echo "Make plots of change of vertical and radial profile"
+if command -v python3 &>/dev/null; then
+    python3 profilefit.py 
+else
+    python profilefit.py 
+fi
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_potential/angularmomentum.py b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/angularmomentum.py
new file mode 100755
index 0000000000000000000000000000000000000000..4398dfeb8b079143886c5565e7667f72fc0bdcef
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/angularmomentum.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+import scipy.optimize as sco
+
+
+Nmax = 2001
+steps = 10
+angmomcomp = False
+
+iterarray = np.arange(0, Nmax + 1, steps)
+Lxtot = np.zeros(len(iterarray))
+Lytot = np.zeros(len(iterarray))
+Lztot = np.zeros(len(iterarray))
+Ltot = np.zeros(len(iterarray))
+time_array = np.zeros(len(iterarray))
+
+
+for i in iterarray:
+    f = h5py.File("output_%04d.hdf5" % i, "r")
+
+    boxsize = f["Header"].attrs["BoxSize"] / 2.0
+
+    time_array[int(i / steps)] = f["Header"].attrs["Time"]
+
+    particles = f["PartType4"]
+    coordinates = particles["Coordinates"][:, :]
+    velocities = particles["Velocities"][:, :]
+    masses = particles["Masses"][:]
+
+    R = (
+        (coordinates[:, 0] - boxsize[0]) ** 2 + (coordinates[:, 1] - boxsize[1]) ** 2
+    ) ** 0.5
+    X = np.abs(coordinates[:, 0] - boxsize[0])
+    Y = np.abs(coordinates[:, 1] - boxsize[1])
+    Z = np.abs(coordinates[:, 2] - boxsize[2])
+
+    vx = velocities[:, 0]
+    vy = velocities[:, 1]
+    vz = velocities[:, 2]
+
+    Lx = (Y * vz - Z * vy) * masses
+    Ly = (Z * vx - X * vz) * masses
+    Lz = (X * vy - Y * vx) * masses
+
+    L = (Lx ** 2 + Ly ** 2 + Lz ** 2) ** 0.5
+
+    Lxtot[int(i / steps)] = np.sum(Lx)
+    Lytot[int(i / steps)] = np.sum(Ly)
+    Lztot[int(i / steps)] = np.sum(Lz)
+    Ltot[int(i / steps)] = np.sum(L)
+
+time_array[-1] = 2.0
+if angmomcomp:
+    plt.plot(time_array, Lxtot / Lxtot[0] - 1, label="Lx total")
+    plt.plot(time_array, Lytot / Lytot[0] - 1, label="Ly total")
+    plt.plot(time_array, Lztot / Lztot[0] - 1, label="Lz total")
+plt.plot(time_array, Ltot / Ltot[0] - 1, label="L total")
+plt.xlabel("Time")
+plt.ylabel("ratio between current and zero angular momentum")
+plt.legend()
+plt.show()
+
+plt.semilogy(time_array, np.absolute(Ltot / Ltot[0] - 1))
+plt.xlabel("Time (Gyr)")
+plt.ylabel("Fractional change of total angular momentum")
+plt.savefig("Total_angular_momentum.png")
+plt.show()
+plt.close()
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_potential/getIC.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a88fb05df5663993ddadd87be476b0444ac1d132
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/getIC.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/3e11-star-only-static-potential-galaxy.hdf5
+
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_potential/isolated_galaxy.yml b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/isolated_galaxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..deee132ee38ae5e04397839a21a677f4851e6bac
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/isolated_galaxy.yml
@@ -0,0 +1,56 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.9891E43   # 10^10 solar masses 
+  UnitLength_in_cgs:   3.08567758E21   # 1 kpc 
+  UnitVelocity_in_cgs: 1E5   # km/s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters for the self-gravity scheme
+Gravity:
+  mesh_side_length:       32        # Number of cells along each axis for the periodic gravity mesh.
+  eta:          0.025               # Constant dimensionless multiplier for time integration.
+  theta:        0.7                 # Opening angle (Multipole acceptance criterion).
+  comoving_softening:     0.0300 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0300    # Physical softening length (in internal units).
+
+# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
+TimeIntegration:
+  time_begin:        0.    # The starting time of the simulation (in internal units).
+  time_end:          2.    # The end time of the simulation (in internal units).
+  dt_min:            1e-6  # The minimal time-step size of the simulation (in internal units).
+  dt_max:            1e-2  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:   output      # Common part of the name of output files
+  time_first: 0.          # (Optional) Time of the first output if non-cosmological time-integration (in internal units)
+  delta_time: 0.001        # Time difference between consecutive outputs (in internal units)
+
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1e-2     # Time between statistics output
+  time_first:             0.     # (Optional) Time of the first stats output if non-cosmological time-integration (in internal units)
+
+Scheduler:
+  max_top_level_cells:   96
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  3e11-star-only-static-potential-galaxy.hdf5  # The file to read
+  periodic:                    0    # Are we running with periodic ICs?
+
+# Hernquist potential parameters
+HernquistPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
+  position:        [0.,0.,0.]    # Location of centre of isothermal potential with respect to centre of the box (if 0) otherwise absolute (if 1) (internal units)
+  idealizeddisk:   1        # Run with an idealized galaxy disk
+  M200:            30.0   # M200 of the galaxy disk
+  h:               0.704    # reduced Hubble constant (value does not specify the used units!)
+  concentration:   7.1      # concentration of the Halo
+  diskfraction:              0.0434370991372   # Disk mass fraction
+  bulgefraction:              0.00705852860979  # Bulge mass fraction
+  timestep_mult:   0.01     # Dimensionless pre-factor for the time-step condition, basically determines the fraction of the orbital time we use to do the time integration
+  epsilon:         0.030      # Softening size (internal units)
+ 
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_potential/profilefit.py b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/profilefit.py
new file mode 100755
index 0000000000000000000000000000000000000000..e7755062ea45de4f42716b14c5896b0da676f001
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/profilefit.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from matplotlib.colors import BoundaryNorm
+from matplotlib.ticker import MaxNLocator
+import scipy.optimize as sco
+import os
+
+
+def linearfunc(x, a, b):
+    return a * x + b
+
+
+def radialfunc(r, h, A):
+    return A * np.exp(-r / h) * r
+
+
+def verticalfunc(z, A, z0, zoff):
+    return 2 * A * np.exp(-(z - zoff) / z0)
+
+
+def verticalfunc2(z, A, z0):
+    return 2 * A * np.exp(-(z) / z0)
+
+
+def verticalfunc3(z, A, z0, zoff, b):
+    return 2 * A * np.exp(-(z - zoff) / z0) + b
+
+
+Nmax = 2001
+steps = 10
+storefits = False
+logfit = True
+normalfit = False
+
+# if the user wants to store the indivudal fits
+if storefits:
+    if not os.path.exists("radial"):
+        os.mkdir("radial")
+        os.mkdir("vertical")
+        os.mkdir("histsnap")
+
+
+# Initialize the arrays
+R_ideal = np.linspace(0, 40, 100)
+Z_ideal = np.linspace(0, 10, 100)
+
+iterarray = np.arange(0, Nmax + 1, steps)
+
+Z0t = np.zeros(len(iterarray))
+Z0terr = np.zeros(len(iterarray))
+h0t = np.zeros(len(iterarray))
+h0terr = np.zeros(len(iterarray))
+Ar = np.zeros(len(iterarray))
+Arerr = np.zeros(len(iterarray))
+Az = np.zeros(len(iterarray))
+Azerr = np.zeros(len(iterarray))
+time_array = np.zeros(len(iterarray))
+
+ar = np.zeros(len(iterarray))
+arerr = np.zeros(len(iterarray))
+br = np.zeros(len(iterarray))
+brerr = np.zeros(len(iterarray))
+az = np.zeros(len(iterarray))
+azerr = np.zeros(len(iterarray))
+bz = np.zeros(len(iterarray))
+bzerr = np.zeros(len(iterarray))
+eps = 1e-6
+
+
+for i in iterarray:
+    # Getting the data from the snapshots
+    f = h5py.File("output_%04d.hdf5" % i, "r")
+
+    boxsize = f["Header"].attrs["BoxSize"] / 2.0
+
+    time_array[int(i / steps)] = f["Header"].attrs["Time"]
+
+    particles = f["PartType4"]
+    coordinates = particles["Coordinates"][:, :]
+    masses = particles["Masses"][:]
+
+    R = (
+        (coordinates[:, 0] - boxsize[0]) ** 2 + (coordinates[:, 1] - boxsize[1]) ** 2
+    ) ** 0.5
+    Z = np.abs(coordinates[:, 1] - boxsize[1])
+
+    # Bin the coordinates to make them suitable for fitting
+    Rhist = np.histogram(R, bins=100, range=[0, 40], normed=True)
+    Zhist = np.histogram(Z, bins=100, range=[0, 10.0], normed=True)
+
+    # Create correct variables for fitting
+    Ry = Rhist[0]
+    Rx = (Rhist[1][1:] + Rhist[1][: len(Rhist[0])]) / 2.0
+
+    Zy = Zhist[0]
+    Zx = (Zhist[1][1:] + Zhist[1][: len(Zhist[0])]) / 2.0
+
+    # Fit with two methods: non-linear LSQ and linear LSQ in log space
+    bestsolR = sco.curve_fit(radialfunc, Rx[10:], Ry[10:], p0=[2.0, 0.2])
+    bestsolZ = sco.curve_fit(verticalfunc, Zx[40:], Zy[40:])
+    bestsolRlog = sco.curve_fit(linearfunc, Rx[10:], np.log10(Ry[10:] + eps))
+    bestsolZlog = sco.curve_fit(linearfunc, Zx[40:], np.log10(Zy[40:] + eps))
+
+    # Store variables
+    h0t[int(i / steps)] = bestsolR[0][0]
+    Z0t[int(i / steps)] = bestsolZ[0][1]
+    Ar[int(i / steps)] = bestsolR[0][1]
+    Az[int(i / steps)] = bestsolZ[0][0]
+    Z0terr[int(i / steps)] = (bestsolZ[1][1, 1]) ** 0.5
+    h0terr[int(i / steps)] = (bestsolR[1][0, 0]) ** 0.5
+    Arerr[int(i / steps)] = (bestsolR[1][1, 1]) ** 0.5
+    Azerr[int(i / steps)] = (bestsolZ[1][0, 0]) ** 0.5
+
+    ar[int(i / steps)] = bestsolRlog[0][0]
+    arerr[int(i / steps)] = (bestsolRlog[1][0, 0]) ** 0.5
+    br[int(i / steps)] = bestsolRlog[0][1]
+    brerr[int(i / steps)] = (bestsolRlog[1][1, 1]) ** 0.5
+    az[int(i / steps)] = bestsolZlog[0][0]
+    azerr[int(i / steps)] = (bestsolZlog[1][0, 0]) ** 0.5
+    bz[int(i / steps)] = bestsolZlog[0][1]
+    bzerr[int(i / steps)] = (bestsolZlog[1][1, 1]) ** 0.5
+
+    if storefits:
+        plt.step(Rx, Ry)
+        plt.plot(
+            R_ideal,
+            radialfunc(R_ideal, bestsolR[0][0], bestsolR[0][1]),
+            label="Non linear LSQ",
+        )
+        plt.plot(
+            R_ideal,
+            10 ** (linearfunc(R_ideal, bestsolRlog[0][0], bestsolRlog[0][1])),
+            label="Linear LSQ",
+        )
+        plt.xlim(0, 40)
+        plt.ylim(0, 0.25)
+        plt.xlabel("R (kpc)")
+        plt.ylabel("Probability")
+        plt.savefig("./radial/radialsnap%04d.png" % i)
+        plt.close()
+
+        plt.step(Zx, Zy)
+        plt.plot(
+            Z_ideal,
+            verticalfunc(Z_ideal, bestsolZ[0][0], bestsolZ[0][1], bestsolZ[0][2]),
+            label="Non linear LSQ",
+        )
+        plt.plot(
+            Z_ideal,
+            10 ** (linearfunc(Z_ideal, bestsolZlog[0][0], bestsolZlog[0][1])),
+            label="Linear LSQ",
+        )
+        plt.xlim(0, 10.0)
+        plt.ylim(0, 0.6)
+        plt.xlabel("z (kpc)")
+        plt.ylabel("Probability")
+        plt.savefig("./vertical/verticalsnap%04d.png" % i)
+        plt.close()
+
+time_array[-1] = 2.0
+
+ax = plt.subplot(111)
+ax.set_yscale("log")
+if logfit:
+    plt.errorbar(
+        time_array,
+        np.absolute(az / (az[0]) - 1),
+        yerr=azerr / (az[0]),
+        label="z0 scale height (Log space)",
+    )
+    plt.errorbar(
+        time_array,
+        np.absolute(ar / (ar[0]) - 1),
+        yerr=arerr / (ar[0]),
+        label="h scale lenght (Log space)",
+    )
+if normalfit:
+    plt.errorbar(
+        time_array,
+        np.absolute(Z0t / (Z0t[0]) - 1),
+        yerr=Z0terr / (Z0t[0]),
+        label="z0 scale height (normal space)",
+    )
+    plt.errorbar(
+        time_array,
+        np.absolute(h0t / (h0t[0]) - 1),
+        yerr=h0terr / (h0t[0]),
+        label="h scale height (normal space)",
+    )
+ax.set_xlabel("Time (Gyr)")
+ax.set_ylabel("Fractional difference")
+plt.legend()
+plt.savefig("Fitdifference-witherror.pdf")
+plt.close()
+
+
+ax = plt.subplot(111)
+ax.set_yscale("log")
+if logfit:
+    plt.plot(
+        time_array, np.absolute(az / (az[0]) - 1), label="z0 scale height (Log space)"
+    )
+    plt.plot(
+        time_array, np.absolute(ar / (ar[0]) - 1), label="h scale lenght (Log space)"
+    )
+if normalfit:
+    plt.plot(
+        time_array,
+        np.absolute(Z0t / (Z0t[0]) - 1),
+        label="z0 scale height (normal space)",
+    )
+    plt.plot(
+        time_array,
+        np.absolute(h0t / (h0t[0]) - 1),
+        label="h scale height (normal space)",
+    )
+ax.set_xlabel("Time (Gyr)")
+ax.set_ylabel("Fractional difference")
+plt.legend()
+plt.savefig("Fitdifference.pdf")
+plt.show()
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_potential/run.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8c0f2011d4a2b633d69ab454d0309b98a05b7f24
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_potential/run.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+if [ ! -e reddeathgalaxy.hdf5 ]
+then
+    echo "Fetching initial conditons for the isolated galaxy with an external potential ..."
+    ./getIC.sh
+fi 
+
+../../swift --external-gravity --self-gravity --stars --threads=16 isolated_galaxy.yml 2>&1 | tee output.log
+
+
+echo "Make plots of conservation of total angular momentum" 
+if command -v python3 &>/dev/null; then
+    python3 angularmomentum.py 
+else
+    python angularmomentum.py 
+fi
+
+echo "Make plots of change of vertical and radial profile"
+if command -v python3 &>/dev/null; then
+    python3 profilefit.py 
+else
+    python profilefit.py 
+fi
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/README b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/README
new file mode 100644
index 0000000000000000000000000000000000000000..719ed3356701983e60e0f032791e4bb9a0524978
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/README
@@ -0,0 +1,43 @@
+Isolated Galaxy generated by the MakeNewDisk code from Springel, Di Matteo &
+Hernquist (2005). The done analysis in this example is similar to the work done
+by Schaye and Dalla Vecchia (2008) (After this SD08). The default example runs
+the simulation for a galaxy with similar mass of their fiducial model and should
+produce plots similar to their middle pannel Figure 4. The code needs to be
+configured to run with the Hernquist external potential as well as the cooling &
+star-formation model of interest. Using the EAGLE model allows to reproduce the
+results of SD08.
+
+The code can also be run for other situations to check to verify the law using
+different parameters, changes that were done in SD08 are given by:
+ - gas fraction of 10% instead of 30%, change the IC to f10.hdf5, see getIC.sh,
+   should reproduce something similar to Figure 4 left hand pannel. Requires 
+   change of fg=.1
+ - gas fraction of 90% instead of 30%, change the IC to f90.hdf5, see getIC.sh,
+   should reproduce something similar to Figure 4 right hand pannel. Requires 
+   change of fg=.9
+ - Changing the effective equation of state to adiabatic, Jeans_gamma_effective 
+   = 1.666667. Should result in something similar to Figure 5 left hand pannel
+   of SD08.
+ - Changing the effective equation of state to isothermal, Jeans_gamma_effective 
+   = 1.0000. Should result in something similar to Figure 5 middle hand pannel
+   of SD08. 
+ - Changing the slope of the Kennicutt-Schmidt law to 1.7, SchmidtLawExponent = 
+   1.7, this should result in a plot similar to Figure 6 of SD08.
+ - Increasing the density threshold by a factor of 10. thresh_norm_HpCM3 = 1.0,
+   should reproduce plot similar to Figure 7.
+ - Decreasing the density threshold by a factor of 10. thresh_norm_HpCM3 = 0.01,
+   should reproduce plot similar to Figure 7.
+ - Running with a lower resultion of a factor 8, change the IC to lowres8.hdf5,
+   see getIC.sh. 
+ - Running with a lower resultion of a factor 64, change the IC to lowres64.hdf5,
+   see getIC.sh. 
+ - Running with a lower resultion of a factor 512, change the IC to lowres512.hdf5,
+   see getIC.sh. 
+
+Other options to verify the correctness of the code is by chaning the following
+parameters:
+  - Changing the normalization to A/2 or 2A.
+  - Running the code with zero metallicity.
+  - Running the code with a factor 6 higher resolution idealized disks, use 
+    highres6.hdf5, see getIC.sh.
+  - Running with different SPH schemes like Anarchy-PU.
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/SFH.py b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/SFH.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa9d9258530396fb7f95237a45af5db9c0da4603
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/SFH.py
@@ -0,0 +1,100 @@
+"""
+Makes a movie using sphviewer and ffmpeg.
+
+Edit low_frac and up_frac to focus on a certain view of the box.
+The colour map can also be changed via colour_map.
+
+Usage: python3 makeMovie.py CoolingHalo_
+
+Written by James Willis (james.s.willis@durham.ac.uk)
+"""
+
+import glob
+import h5py as h5
+import numpy as np
+import matplotlib.pyplot as plt
+
+from tqdm import tqdm
+
+
+def getSFH(filename):
+
+    # Read the data
+    with h5.File(filename, "r") as f:
+        box_size = f["/Header"].attrs["BoxSize"][0]
+        coordinates = f["/PartType4/Coordinates"][:, :]
+        mass = f["/PartType4/Masses"][:]
+        # flag = f["/PartType4/NewStarFlag"][:]
+        birth_time = f["/PartType4/Birth_time"][:]
+
+    absmaxz = 2  # kpc
+    absmaxxy = 10  # kpc
+
+    part_mask = (
+        ((coordinates[:, 0] - box_size / 2.0) > -absmaxxy)
+        & ((coordinates[:, 0] - box_size / 2.0) < absmaxxy)
+        & ((coordinates[:, 1] - box_size / 2.0) > -absmaxxy)
+        & ((coordinates[:, 1] - box_size / 2.0) < absmaxxy)
+        & ((coordinates[:, 2] - box_size / 2.0) > -absmaxz)
+        & ((coordinates[:, 2] - box_size / 2.0) < absmaxz)
+    )  # & (flag==1)
+
+    birth_time = birth_time[part_mask]
+    mass = mass[part_mask]
+
+    histogram = np.histogram(birth_time, bins=200, weights=mass * 2e4, range=[0, 0.1])
+    values = histogram[0]
+    xvalues = (histogram[1][:-1] + histogram[1][1:]) / 2.0
+    return xvalues, values
+
+
+def getsfrsnapwide():
+
+    time = np.arange(1, 101, 1)
+    SFR_sparticles = np.zeros(100)
+    SFR_gparticles = np.zeros(100)
+    new_sparticles = np.zeros(100)
+    previous_mass = 0
+    previous_numb = 0
+    for i in tqdm(range(1, 100)):
+        # Read the data
+        filename = "output_%04d.hdf5" % i
+        with h5.File(filename, "r") as f:
+            box_size = f["/Header"].attrs["BoxSize"][0]
+            coordinates = f["/PartType0/Coordinates"][:, :]
+            SFR = f["/PartType0/SFR"][:]
+            coordinates_star = f["/PartType4/Coordinates"][:, :]
+            masses_star = f["/PartType4/Masses"][:]
+
+        absmaxz = 2  # kpc
+        absmaxxy = 10  # kpc
+
+        part_mask = (
+            ((coordinates[:, 0] - box_size / 2.0) > -absmaxxy)
+            & ((coordinates[:, 0] - box_size / 2.0) < absmaxxy)
+            & ((coordinates[:, 1] - box_size / 2.0) > -absmaxxy)
+            & ((coordinates[:, 1] - box_size / 2.0) < absmaxxy)
+            & ((coordinates[:, 2] - box_size / 2.0) > -absmaxz)
+            & ((coordinates[:, 2] - box_size / 2.0) < absmaxz)
+            & (SFR > 0)
+        )
+
+        SFR = SFR[part_mask]
+
+        total_SFR = np.sum(SFR)
+        SFR_gparticles[i] = total_SFR * 10
+
+    return time[:-1], SFR_gparticles[1:]
+
+
+if __name__ == "__main__":
+
+    time, SFR1 = getsfrsnapwide()  # , SFR2, SFR_error = getsfrsnapwide()
+    time2, SFR3 = getSFH("output_%04d.hdf5" % 100)
+    plt.plot(time2[1:] * 1e3, SFR3[1:], label="Using birth_time of star particles")
+    plt.plot(time, SFR1, label="Using SFR of gas particles", color="g")
+    plt.xlabel("Time (Myr)")
+    plt.ylabel("SFH ($\\rm M_\odot \\rm yr^{-1}$)")
+    plt.ylim(0, 20)
+    plt.legend()
+    plt.savefig("SFH.png")
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/getIC.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..eddbe4cc4a0157e5f888e263a9562a539f178cbe
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/getIC.sh
@@ -0,0 +1,9 @@
+#!/bin/bash 
+
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/fid.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/f10.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/f90.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/lowres8.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/lowres64.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/lowres512.hdf5
+# wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/IsolatedGalaxies/highres6.hdf5
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/isolated_galaxy.yml b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/isolated_galaxy.yml
new file mode 100644
index 0000000000000000000000000000000000000000..7ba1e601c764d9c12b93178efd8226601af8373c
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/isolated_galaxy.yml
@@ -0,0 +1,109 @@
+# Define the system of units to use internally.
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.9891E43   # 10^10 solar masses 
+  UnitLength_in_cgs:   3.08567758E21   # 1 kpc 
+  UnitVelocity_in_cgs: 1E5   # km/s
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Parameters for the self-gravity scheme
+Gravity:
+  mesh_side_length:       32        # Number of cells along each axis for the periodic gravity mesh.
+  eta:          0.025               # Constant dimensionless multiplier for time integration.
+  theta:        0.7                 # Opening angle (Multipole acceptance criterion).
+  comoving_softening:     0.01      # Comoving softening length (in internal units).
+  max_physical_softening: 0.01      # Physical softening length (in internal units).
+
+# Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
+TimeIntegration:
+  time_begin:        0.    # The starting time of the simulation (in internal units).
+  time_end:          0.1   # The end time of the simulation (in internal units).
+  dt_min:            1e-9  # The minimal time-step size of the simulation (in internal units).
+  dt_max:            1e-2  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:   output      # Common part of the name of output files
+  time_first: 0.          # (Optional) Time of the first output if non-cosmological time-integration (in internal units)
+  delta_time: 0.001        # Time difference between consecutive outputs (in internal units)
+
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1e-2     # Time between statistics output
+  time_first:             0.     # (Optional) Time of the first stats output if non-cosmological time-integration (in internal units)
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:               fid.hdf5 # The file to read
+  periodic:                0        # Are we running with periodic ICs?
+  stars_smoothing_length:  0.5
+  
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  h_min_ratio:           0.1      # Minimal smoothing in units of softening.
+  h_max:                 10.
+
+# Standard EAGLE cooling options
+EAGLECooling:
+  dir_name:                ./coolingtables/  # Location of the Wiersma+08 cooling tables
+  H_reion_z:               11.5              # Redshift of Hydrogen re-ionization
+  He_reion_z_centre:       3.5               # Redshift of the centre of the Helium re-ionization Gaussian
+  He_reion_z_sigma:        0.5               # Spread in redshift of the  Helium re-ionization Gaussian
+  He_reion_eV_p_H:         2.0               # Energy inject by Helium re-ionization in electron-volt per Hydrogen atom
+
+# Primordial abundances
+EAGLEChemistry:
+  init_abundance_metal:     0.0129          # Inital fraction of particle mass in *all* metals
+  init_abundance_Hydrogen:  0.7065       # Inital fraction of particle mass in Hydrogen
+  init_abundance_Helium:    0.2806        # Inital fraction of particle mass in Helium
+  init_abundance_Carbon:    0.00207        # Inital fraction of particle mass in Carbon
+  init_abundance_Nitrogen:  0.000836        # Inital fraction of particle mass in Nitrogen
+  init_abundance_Oxygen:    0.00549        # Inital fraction of particle mass in Oxygen
+  init_abundance_Neon:      0.00141        # Inital fraction of particle mass in Neon
+  init_abundance_Magnesium: 0.000591        # Inital fraction of particle mass in Magnesium
+  init_abundance_Silicon:   0.000683        # Inital fraction of particle mass in Silicon
+  init_abundance_Iron:      0.0011        # Inital fraction of particle mass in Iron
+
+# Hernquist potential parameters
+HernquistPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
+  position:        [0.,0.,0.]    # Location of centre of isothermal potential with respect to centre of the box (if 0) otherwise absolute (if 1) (internal units)
+  idealizeddisk:   1        # Run with an idealized galaxy disk
+  M200:            137.0   # M200 of the galaxy disk
+  h:               0.704    # reduced Hubble constant (value does not specify the used units!)
+  concentration:   9.0      # concentration of the Halo
+  diskfraction:              0.040   # Disk mass fraction
+  bulgefraction:              0.014   # Bulge mass fraction
+  timestep_mult:   0.01     # Dimensionless pre-factor for the time-step condition, basically determines the fraction of the orbital time we use to do the time integration
+  epsilon:         0.01      # Softening size (internal units)
+ 
+# EAGLE star formation parameters
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.3       # The gas fraction used internally by the model.
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_temperature_margin_dex:         0.5       # Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars.
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/plotSolution.py b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/plotSolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..89a87923148cb6872ab17b6d7229aef597ef3287
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/plotSolution.py
@@ -0,0 +1,368 @@
+import matplotlib
+
+matplotlib.use("Agg")
+from pylab import *
+from scipy import stats
+import h5py as h5
+
+# Plot parameters
+params = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 9,
+    "legend.fontsize": 9,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "text.usetex": True,
+    "figure.figsize": (3.15, 3.15),
+    "figure.subplot.left": 0.15,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.13,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.15,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 2.0,
+    "text.latex.unicode": True,
+}
+rcParams.update(params)
+rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+snap = int(sys.argv[1])
+filename = "output_%.4d.hdf5"%snap
+
+f = h5.File(filename, "r")
+
+# Physical constants
+k_in_cgs = 1.38064852e-16
+mH_in_cgs = 1.6737236e-24
+year_in_cgs = 3600.0 * 24 * 365.0
+Msun_in_cgs = 1.98848e33
+G_in_cgs = 6.67259e-8
+pc_in_cgs = 3.08567758e18
+Msun_p_pc2 = Msun_in_cgs / pc_in_cgs**2
+
+# Gemoetry info
+boxsize = f["/Header"].attrs["BoxSize"]
+centre = boxsize / 2.0
+
+# Read units
+unit_length_in_cgs = f["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = f["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = f["/Units"].attrs["Unit time in cgs (U_t)"]
+
+# Calculate Gravitational constant in internal units
+G = G_in_cgs * ( unit_length_in_cgs**3 / unit_mass_in_cgs / unit_time_in_cgs**2)**(-1)
+
+# Read parameters of the SF model
+KS_law_slope = float(f["/Parameters"].attrs["EAGLEStarFormation:KS_exponent"])
+KS_law_norm = float(f["/Parameters"].attrs["EAGLEStarFormation:KS_normalisation"])
+KS_thresh_Z0 = float(f["/Parameters"].attrs["EAGLEStarFormation:threshold_Z0"])
+KS_thresh_slope = float(f["/Parameters"].attrs["EAGLEStarFormation:threshold_slope"])
+KS_thresh_norm = float(f["/Parameters"].attrs["EAGLEStarFormation:threshold_norm_H_p_cm3"])
+KS_gas_fraction = float(f["/Parameters"].attrs["EAGLEStarFormation:gas_fraction"])
+KS_thresh_max_norm = float(f["/Parameters"].attrs["EAGLEStarFormation:threshold_max_density_H_p_cm3"])
+KS_high_den_thresh = float(f["/Parameters"].attrs["EAGLEStarFormation:KS_high_density_threshold_H_p_cm3"])
+KS_law_slope_high_den = float(f["/Parameters"].attrs["EAGLEStarFormation:KS_high_density_exponent"])
+EOS_gamma_effective = float(f["/Parameters"].attrs["EAGLEStarFormation:EOS_gamma_effective"])                           
+EOS_density_norm = float(f["/Parameters"].attrs["EAGLEStarFormation:EOS_density_norm_H_p_cm3"])                           
+EOS_temp_norm = float(f["/Parameters"].attrs["EAGLEStarFormation:EOS_temperature_norm_K"])                           
+
+# Read reference metallicity
+EAGLE_Z = float(f["/Parameters"].attrs["EAGLEChemistry:init_abundance_metal"])
+
+# Read parameters of the entropy floor
+EAGLEfloor_Jeans_rho_norm = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Jeans_density_threshold_H_p_cm3"])
+EAGLEfloor_Jeans_temperature_norm_K = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Jeans_temperature_norm_K"])
+EAGLEfloor_Jeans_gamma_effective = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Jeans_gamma_effective"])
+EAGLEfloor_cool_rho_norm = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Cool_density_threshold_H_p_cm3"])
+EAGLEfloor_cool_temperature_norm_K = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Cool_temperature_norm_K"])
+EAGLEfloor_cool_gamma_effective = float(f["/Parameters"].attrs["EAGLEEntropyFloor:Cool_gamma_effective"])
+
+# Properties of the KS law
+KS_law_norm_cgs = KS_law_norm * Msun_in_cgs / ( 1e6 * pc_in_cgs**2 * year_in_cgs )
+gamma = 5./3.
+EOS_press_norm = k_in_cgs * EOS_temp_norm * EOS_density_norm
+
+# Star formation threshold
+SF_thresh = KS_thresh_norm * (EAGLE_Z / KS_thresh_Z0)**(KS_thresh_slope)
+
+# Read gas properties
+gas_pos = f["/PartType0/Coordinates"][:, :]
+gas_mass = f["/PartType0/Masses"][:]
+gas_rho = f["/PartType0/Density"][:]
+gas_T = f["/PartType0/Temperature"][:]
+gas_SFR = f["/PartType0/SFR"][:]
+gas_XH = f["/PartType0/ElementAbundance"][:, 0]
+gas_Z = f["/PartType0/Metallicity"][:]
+gas_hsml = f["/PartType0/SmoothingLength"][:]
+gas_sSFR = gas_SFR / gas_mass
+
+# Read the Star properties
+stars_pos = f["/PartType4/Coordinates"][:, :]
+stars_BirthDensity = f["/PartType4/BirthDensity"][:]
+stars_BirthTime = f["/PartType4/BirthTime"][:]
+stars_XH = f["/PartType4/ElementAbundance"][:,0]
+
+# Centre the box
+gas_pos[:, 0] -= centre[0]
+gas_pos[:, 1] -= centre[1]
+gas_pos[:, 2] -= centre[2]
+
+stars_pos[:,0] -= centre[0]
+stars_pos[:,1] -= centre[1]
+stars_pos[:,2] -= centre[2]
+
+# Turn the mass into better units
+gas_mass *= unit_mass_in_cgs / Msun_in_cgs
+
+# Turn the SFR into better units
+gas_SFR = np.maximum(gas_SFR, np.zeros(np.size(gas_SFR)))
+gas_SFR /= unit_time_in_cgs / year_in_cgs
+gas_SFR *= unit_mass_in_cgs / Msun_in_cgs
+
+# Make it a Hydrogen number density
+gas_nH = gas_rho * unit_mass_in_cgs / unit_length_in_cgs ** 3
+gas_nH /= mH_in_cgs
+gas_nH *= gas_XH
+
+stars_BirthDensity *= unit_mass_in_cgs / unit_length_in_cgs ** 3
+stars_BirthDensity /= mH_in_cgs
+stars_BirthDensity *= stars_XH
+
+# Equations of state
+eos_cool_rho = np.logspace(-5, 5, 1000)
+eos_cool_T = EAGLEfloor_cool_temperature_norm_K * (eos_cool_rho / EAGLEfloor_cool_rho_norm) ** ( EAGLEfloor_cool_gamma_effective - 1.0 )
+eos_Jeans_rho = np.logspace(-1, 5, 1000)
+eos_Jeans_T = EAGLEfloor_Jeans_temperature_norm_K * (eos_Jeans_rho / EAGLEfloor_Jeans_rho_norm) ** (EAGLEfloor_Jeans_gamma_effective - 1.0 ) 
+
+########################################################################3
+
+# Plot the phase space diagram
+figure()
+subplot(111, xscale="log", yscale="log")
+plot(eos_cool_rho, eos_cool_T, "k--", lw=0.6)
+plot(eos_Jeans_rho, eos_Jeans_T, "k--", lw=0.6)
+scatter(gas_nH, gas_T, s=0.2)
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=2)
+xlim(3e-6, 3e3)
+ylim(500.0, 2e5)
+savefig("rhoT.png", dpi=200)
+
+# Plot the phase space diagram for SF gas
+figure()
+subplot(111, xscale="log", yscale="log")
+plot(eos_cool_rho, eos_cool_T, "k--", lw=0.6)
+plot(eos_Jeans_rho, eos_Jeans_T, "k--", lw=0.6)
+plot([SF_thresh, SF_thresh], [1, 1e10], "k:", lw=0.6)
+text(SF_thresh*0.9, 2e4, "$n_{\\rm H, thresh}=%.3f~{\\rm cm^{-3}}$"%SF_thresh, fontsize=8, rotation=90, ha="right", va="bottom")
+scatter(gas_nH[gas_SFR > 0.0], gas_T[gas_SFR > 0.0], s=0.2)
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=2)
+xlim(3e-6, 3e3)
+ylim(500.0, 2e5)
+savefig("rhoT_SF.png", dpi=200)
+
+########################################################################3
+
+# 3D Density vs SFR
+figure()
+subplot(111, xscale="log", yscale="log")
+scatter(gas_nH, gas_SFR, s=0.2)
+plot([1, 100], 2e-5 * np.array([1, 100]) ** 0.266667, "k--", lw=1)
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm SFR}~[{\\rm M_\\odot~\\cdot~yr^{-1}}]$", labelpad=-7)
+xlim(1e-4, 3e3)
+ylim(8e-6, 2.5e-4)
+savefig("rho_SFR.png", dpi=200)
+
+########################################################################3
+
+star_mask = (
+    (stars_pos[:, 0] > -15)
+    & (stars_pos[:, 0] < 15)
+    & (stars_pos[:, 1] > -15)
+    & (stars_pos[:, 1] < 15)
+    & (stars_pos[:, 2] < 1.0)
+    & (stars_pos[:, 2] > -1.0)
+)
+
+stars_BirthDensity = stars_BirthDensity[star_mask] 
+#stars_BirthFlag = stars_BirthFlag[star_mask]
+stars_BirthTime = stars_BirthTime[star_mask]
+
+# Histogram of the birth density
+figure()
+subplot(111, xscale="linear", yscale="linear")
+hist(np.log10(stars_BirthDensity),density=True,bins=20,range=[-2,5])
+xlabel("${\\rm Stellar~birth~density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Probability}$", labelpad=-7)
+savefig("BirthDensity.png", dpi=200)
+
+# Plot of the specific star formation rate in the galaxy
+rhos = 10**np.linspace(-1,np.log10(KS_high_den_thresh),100)
+rhoshigh = 10**np.linspace(np.log10(KS_high_den_thresh),5,100)
+
+P_effective = EOS_press_norm * ( rhos / EOS_density_norm)**(EOS_gamma_effective)
+P_norm_high = EOS_press_norm * (KS_high_den_thresh  / EOS_density_norm)**(EOS_gamma_effective)
+sSFR = KS_law_norm_cgs * (Msun_p_pc2)**(-KS_law_slope) * (gamma/G_in_cgs * KS_gas_fraction *P_effective)**((KS_law_slope-1.)/2.)
+KS_law_norm_high_den_cgs = KS_law_norm_cgs * (Msun_p_pc2)**(-KS_law_slope) * (gamma/G_in_cgs * KS_gas_fraction * P_norm_high)**((KS_law_slope-1.)/2.)
+sSFR_high_den = KS_law_norm_high_den_cgs * ((rhoshigh/KS_high_den_thresh)**EOS_gamma_effective)**((KS_law_slope_high_den-1)/2.)
+
+# density - sSFR plane
+figure()
+subplot(111)
+hist2d(np.log10(gas_nH), np.log10(gas_sSFR), bins=50,range=[[-1.5,5],[-.5,2.5]])
+plot(np.log10(rhos),np.log10(sSFR)+np.log10(year_in_cgs)+9.,'k--',label='sSFR low density EAGLE')
+plot(np.log10(rhoshigh),np.log10(sSFR_high_den)+np.log10(year_in_cgs)+9.,'k--',label='sSFR high density EAGLE')
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=2)
+ylabel("${\\rm sSFR}~[{\\rm Gyr^{-1}}]$", labelpad=0)
+xticks([-1, 0, 1, 2, 3, 4], ["$10^{-1}$", "$10^0$", "$10^1$", "$10^2$", "$10^3$", "$10^4$"])
+yticks([0, 1, 2], ["$10^0$", "$10^1$", "$10^2$"])
+xlim(-1.4, 4.9)
+ylim(-0.5, 2.2)
+savefig("density-sSFR.png", dpi=200)
+
+########################################################################3
+
+# Select gas in a pillow box around the galaxy
+mask = (
+    (gas_pos[:, 0] > -15)
+    & (gas_pos[:, 0] < 15)
+    & (gas_pos[:, 1] > -15)
+    & (gas_pos[:, 1] < 15)
+    & (gas_pos[:, 2] < 1.0)
+    & (gas_pos[:, 2] > -1.0)
+)
+gas_pos = gas_pos[mask, :]
+gas_SFR = gas_SFR[mask]
+gas_nH = gas_nH[mask]
+gas_rho = gas_rho[mask]
+gas_T = gas_T[mask]
+gas_mass = gas_mass[mask]
+gas_Z = gas_Z[mask]
+gas_hsml = gas_hsml[mask]
+
+
+# Make a crude map of the gas
+figure()
+subplot(111)
+scatter(gas_pos[:, 0], gas_pos[:, 1], s=0.1)
+xlabel("${\\rm Pos}~x~[{\\rm kpc}]$", labelpad=0)
+ylabel("${\\rm Pos}~y~[{\\rm kpc}]$", labelpad=-3)
+xlim(-12, 12)
+ylim(-12, 12)
+savefig("face_on.png", dpi=200)
+
+figure()
+subplot(111)
+scatter(gas_pos[:, 0], gas_pos[:, 2], s=0.1)
+xlabel("${\\rm Pos}~x~[{\\rm kpc}]$", labelpad=0)
+ylabel("${\\rm Pos}~z~[{\\rm kpc}]$", labelpad=-3)
+xlim(-12, 12)
+ylim(-12, 12)
+savefig("edge_on.png", dpi=200)
+
+# Now a SF map
+rcParams.update({"figure.figsize": (4.15, 3.15)})
+figure()
+subplot(111)
+scatter(gas_pos[:, 0], gas_pos[:, 1], s=0.1, c=gas_SFR)
+xlabel("${\\rm Pos}~x~[{\\rm kpc}]$", labelpad=0)
+ylabel("${\\rm Pos}~y~[{\\rm kpc}]$", labelpad=-3)
+colorbar()
+xlim(-12, 12)
+ylim(-12, 12)
+savefig("SF_face_on.png", dpi=200)
+
+
+########################################################################3
+
+# Bin the data in kpc-size patches
+
+x_edges = np.linspace(-15, 15, 31)
+y_edges = np.linspace(-15, 15, 31)
+
+map_mass, _, _, _ = stats.binned_statistic_2d(
+    gas_pos[:, 0], gas_pos[:, 1], gas_mass, statistic="sum", bins=(x_edges, y_edges)
+)
+map_SFR, _, _, _ = stats.binned_statistic_2d(
+    gas_pos[:, 0], gas_pos[:, 1], gas_SFR, statistic="sum", bins=(x_edges, y_edges)
+)
+
+# Mass map
+figure()
+subplot(111)
+pcolormesh(x_edges, y_edges, np.log10(map_mass))
+colorbar()
+xlim(-12, 12)
+ylim(-12, 12)
+xlabel("${\\rm Pos}~x~[{\\rm kpc}]$", labelpad=0)
+ylabel("${\\rm Pos}~y~[{\\rm kpc}]$", labelpad=-3)
+savefig("Map_mass.png", dpi=200)
+
+# SF map
+figure()
+subplot(111)
+pcolormesh(x_edges, y_edges, np.log10(map_SFR), vmax=-0.5, vmin=-4.5)
+colorbar()
+xlim(-12, 12)
+ylim(-12, 12)
+xlabel("${\\rm Pos}~x~[{\\rm kpc}]$", labelpad=0)
+ylabel("${\\rm Pos}~y~[{\\rm kpc}]$", labelpad=-3)
+savefig("Map_SFR.png", dpi=200)
+
+#########################################################################
+
+# Give a minimum SF surface density for the plots
+map_SFR[map_SFR < 1e-6] = 1e-6
+
+# Theoretical threshold (assumes all gas has the same Z)
+KS_n_thresh = KS_thresh_norm * (gas_Z[0] / KS_thresh_Z0) ** KS_thresh_slope
+if np.isfinite(KS_n_thresh) == False:
+    KS_n_thresh = KS_thresh_max_norm
+KS_sigma_thresh = 29.0 * np.sqrt(KS_gas_fraction) * np.sqrt(KS_n_thresh)
+
+# Theoretical KS law
+KS_sigma_mass = np.logspace(-1, 3, 100)
+KS_sigma_SFR = KS_law_norm * KS_sigma_mass ** KS_law_slope
+
+# KS relation
+rcParams.update({"figure.figsize": (3.15, 3.15), "figure.subplot.left": 0.18})
+figure()
+subplot(111, xscale="log", yscale="log")
+plot(KS_sigma_mass, KS_sigma_SFR, "k--", lw=0.6)
+plot([KS_sigma_thresh, KS_sigma_thresh], [1e-8, 1e8], "k--", lw=0.6)
+text(
+    KS_sigma_thresh * 0.95,
+    2.2,
+    "$\\Sigma_{\\rm c} = %.2f~{\\rm M_\\odot\\cdot pc^{-2}}$" % KS_sigma_thresh,
+    va="top",
+    ha="right",
+    rotation=90,
+    fontsize=7,
+)
+text(16, 10 ** (-3.5), "$n_{\\rm H,c} = %.3f~{\\rm cm^{-3}}$" % KS_n_thresh, fontsize=7)
+text(
+    16,
+    2e-6,
+    "${\\rm K\\textendash S~law}$:\n$\\Sigma_{\\rm SFR} = A \\times \\Sigma_g^n$\n$n=%.1f$\n$A=%.3f\\times10^{-4}~{\\rm M_\\odot / yr^{1} / kpc^{2}}$\n$f_{\\rm g} = %.1f$\n$\gamma_{\\rm eos} = %.3f$\n$Z=%1.4f$"
+    % (
+        KS_law_slope,
+        KS_law_norm * 10 ** 4,
+        KS_gas_fraction,
+        EOS_gamma_effective,
+        EAGLE_Z,
+    ),
+    fontsize=7,
+)
+scatter(map_mass.flatten() / 1e6, map_SFR.flatten(), s=0.4)
+xlim(0.3, 900)
+ylim(3e-7, 3)
+xlabel("$\\Sigma_g~[{\\rm M_\\odot\\cdot pc^{-2}}]$", labelpad=0)
+ylabel(
+    "$\\Sigma_{\\rm SFR}~[{\\rm M_\\odot \\cdot yr^{-1} \\cdot kpc^{-2}}]$", labelpad=0
+)
+savefig("KS_law.png", dpi=200)
+close()
diff --git a/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/run.sh b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f2aa93fd0e11cd9b07be991187cf0780a82ebec8
--- /dev/null
+++ b/examples/IsolatedGalaxy/IsolatedGalaxy_starformation/run.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+if [ ! -e fid.hdf5 ] 
+then     
+    echo "Fetching initial conditions for the isolated galaxy example..."
+    ./getIC.sh
+fi
+
+../../swift --threads=32 --external-gravity --self-gravity --stars --star-formation --cooling --temperature --hydro isolated_galaxy.yml 2>&1 | tee output.log
+
+# Kennicutt-Schmidt law plot
+python3 plotSolution.py
+
+# Plot that the random star formation matches the expected SFH
+python3 SFH.py
diff --git a/examples/IsolatedGalaxy/README b/examples/IsolatedGalaxy/README
new file mode 100644
index 0000000000000000000000000000000000000000..ddc340ca1ccc76289805a442fc208811cee897c8
--- /dev/null
+++ b/examples/IsolatedGalaxy/README
@@ -0,0 +1,3 @@
+This directory contains a series of examples using an isolated galaxy
+disk embedded in an external Hernquist dark matter halo. These are
+ideal for testing galaxy formation subgrid model implementation.
diff --git a/examples/Makefile.am b/examples/Makefile.am
index 02664eea177048a67690b9e6af04d10b2e22aa31..1c8415e1302df828133d0459d7dc9e9b4d73ffcb 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -1,4 +1,4 @@
-# tHIS FIle is part of SWIFT.
+# This file is part of SWIFT.
 # Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
 #                    Matthieu Schaller (matthieu.schaller@durham.ac.uk).
 #
@@ -19,16 +19,19 @@
 MYFLAGS = 
 
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
-AM_CFLAGS = -I$(top_srcdir)/src $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS)
+AM_CFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/argparse $(HDF5_CPPFLAGS) \
+	$(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS)
 
 AM_LDFLAGS = $(HDF5_LDFLAGS)
 
 # Extra libraries.
-EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(VELOCIRAPTOR_LIBS) $(GSL_LIBS)
+EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) \
+	$(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) \
+	$(VELOCIRAPTOR_LIBS) $(GSL_LIBS)
 
 # MPI libraries.
-MPI_LIBS = $(METIS_LIBS) $(MPI_THREAD_LIBS)
-MPI_FLAGS = -DWITH_MPI $(METIS_INCS)
+MPI_LIBS = $(PARMETIS_LIBS) $(METIS_LIBS) $(MPI_THREAD_LIBS)
+MPI_FLAGS = -DWITH_MPI $(PARMETIS_INCS) $(METIS_INCS)
 
 # Programs.
 bin_PROGRAMS = swift
@@ -48,70 +51,59 @@ endif
 # Sources for swift
 swift_SOURCES = main.c
 swift_CFLAGS = $(MYFLAGS) $(AM_CFLAGS) -DENGINE_POLICY="engine_policy_keep $(ENGINE_POLICY_SETAFFINITY)"
-swift_LDADD =  ../src/.libs/libswiftsim.a $(EXTRA_LIBS)
+swift_LDADD =  ../src/.libs/libswiftsim.a ../argparse/.libs/libargparse.a $(EXTRA_LIBS)
 
 # Sources for swift_mpi, do we need an affinity policy for MPI?
 swift_mpi_SOURCES = main.c
 swift_mpi_CFLAGS = $(MYFLAGS) $(AM_CFLAGS) $(MPI_FLAGS) -DENGINE_POLICY="engine_policy_keep $(ENGINE_POLICY_SETAFFINITY)"
-swift_mpi_LDADD =  ../src/.libs/libswiftsim_mpi.a $(MPI_LIBS) $(EXTRA_LIBS)
+swift_mpi_LDADD =  ../src/.libs/libswiftsim_mpi.a ../argparse/.libs/libargparse.a $(MPI_LIBS) $(EXTRA_LIBS)
 
 # Scripts to generate ICs
-EXTRA_DIST = CoolingBox/coolingBox.yml CoolingBox/energy_plot.py CoolingBox/makeIC.py CoolingBox/run.sh \
-             ConstantCosmoVolume/run.sh ConstantCosmoVolume/makeIC.py ConstantCosmoVolume/plotSolution.py ConstantCosmoVolume/constant_volume.yml \
-	     EAGLE_6/eagle_6.yml EAGLE_6/getIC.sh EAGLE_6/README EAGLE_6/run.sh \
-	     EAGLE_12/eagle_12.yml EAGLE_12/getIC.sh EAGLE_12/README EAGLE_12/run.sh \
-	     EAGLE_25/eagle_25.yml EAGLE_25/getIC.sh EAGLE_25/README EAGLE_25/run.sh \
-	     EAGLE_50/eagle_50.yml EAGLE_50/getIC.sh EAGLE_50/README EAGLE_50/run.sh \
-	     EAGLE_100/eagle_100.yml EAGLE_100/getIC.sh EAGLE_100/README EAGLE_100/run.sh \
-	     EAGLE_DMO_12/eagle_12.yml EAGLE_DMO_12/getIC.sh EAGLE_DMO_12/README EAGLE_DMO_12/run.sh \
-	     EAGLE_DMO_25/eagle_25.yml EAGLE_DMO_25/getIC.sh EAGLE_DMO_25/README EAGLE_DMO_25/run.sh \
-	     EAGLE_DMO_50/eagle_50.yml EAGLE_DMO_50/getIC.sh EAGLE_DMO_50/README EAGLE_DMO_50/run.sh \
-	     EAGLE_DMO_100/eagle_100.yml EAGLE_DMO_100/getIC.sh EAGLE_DMO_100/README EAGLE_DMO_100/run.sh \
-	     EvrardCollapse_3D/evrard.yml EvrardCollapse_3D/makeIC.py EvrardCollapse_3D/plotSolution.py EvrardCollapse_3D/run.sh EvrardCollapse_3D/getReference.sh \
-	     ExternalPointMass/externalPointMass.yml ExternalPointMass/makeIC.py ExternalPointMass/run.sh ExternalPointMass/energy_plot.py \
-	     GreshoVortex_2D/getGlass.sh GreshoVortex_2D/gresho.yml GreshoVortex_2D/makeIC.py GreshoVortex_2D/plotSolution.py GreshoVortex_2D/run.sh \
-	     GreshoVortex_3D/getGlass.sh GreshoVortex_3D/gresho.yml GreshoVortex_3D/makeIC.py GreshoVortex_3D/plotSolution.py GreshoVortex_3D/run.sh \
-	     HydrostaticHalo/README HydrostaticHalo/hydrostatic.yml HydrostaticHalo/makeIC.py HydrostaticHalo/run.sh \
-	     HydrostaticHalo/density_profile.py HydrostaticHalo/velocity_profile.py HydrostaticHalo/internal_energy_profile.py HydrostaticHalo/test_energy_conservation.py \
-	     InteractingBlastWaves_1D/run.sh InteractingBlastWaves_1D/makeIC.py InteractingBlastWaves_1D/plotSolution.py InteractingBlastWaves_1D/interactingBlastWaves.yml InteractingBlastWaves_1D/getReference.sh \
-	     IsothermalPotential/README IsothermalPotential/run.sh IsothermalPotential/energy_plot.py IsothermalPotential/isothermal.yml IsothermalPotential/makeIC.py \
-	     KelvinHelmholtz_2D/kelvinHelmholtz.yml KelvinHelmholtz_2D/makeIC.py KelvinHelmholtz_2D/plotSolution.py KelvinHelmholtz_2D/run.sh \
-	     MultiTypes/makeIC.py  MultiTypes/multiTypes.yml MultiTypes/run.sh \
-             Noh_1D/makeIC.py Noh_1D/noh.yml Noh_1D/plotSolution.py Noh_1D/run.sh \
-             Noh_2D/makeIC.py Noh_2D/noh.yml Noh_2D/plotSolution.py Noh_2D/run.sh Noh_2D/getGlass.sh \
-             Noh_3D/makeIC.py Noh_3D/noh.yml Noh_3D/plotSolution.py Noh_3D/run.sh Noh_3D/getGlass.sh \
-	     PerturbedBox_2D/makeIC.py PerturbedBox_2D/perturbedPlane.yml \
-	     PerturbedBox_3D/makeIC.py PerturbedBox_3D/perturbedBox.yml PerturbedBox_3D/run.sh \
-	     SedovBlast_1D/makeIC.py SedovBlast_1D/plotSolution.py SedovBlast_1D/run.sh SedovBlast_1D/sedov.yml \
-	     SedovBlast_2D/getGlass.sh SedovBlast_2D/makeIC.py SedovBlast_2D/plotSolution.py SedovBlast_2D/run.sh SedovBlast_2D/sedov.yml \
-	     SedovBlast_3D/getGlass.sh SedovBlast_3D/makeIC.py SedovBlast_3D/plotSolution.py SedovBlast_3D/run.sh SedovBlast_3D/sedov.yml \
-             SineWavePotential_1D/makeIC.py SineWavePotential_1D/plotSolution.py SineWavePotential_1D/run.sh SineWavePotential_1D/sineWavePotential.yml \
-             SineWavePotential_2D/makeIC.py SineWavePotential_2D/plotSolution.py SineWavePotential_2D/run.sh SineWavePotential_2D/sineWavePotential.yml \
-             SineWavePotential_3D/makeIC.py SineWavePotential_3D/plotSolution.py SineWavePotential_3D/run.sh SineWavePotential_3D/sineWavePotential.yml \
-             SmallCosmoVolume/README SmallCosmoVolume/getIC.sh SmallCosmoVolume/run.sh SmallCosmoVolume/small_cosmo_volume.yml \
-	     SodShock_1D/makeIC.py SodShock_1D/plotSolution.py SodShock_1D/run.sh SodShock_1D/sodShock.yml \
-	     SodShock_2D/getGlass.sh SodShock_2D/makeIC.py SodShock_2D/plotSolution.py SodShock_2D/run.sh SodShock_2D/sodShock.yml \
-	     SodShock_3D/getGlass.sh SodShock_3D/makeIC.py SodShock_3D/plotSolution.py SodShock_3D/run.sh SodShock_3D/sodShock.yml \
-	     SquareTest_2D/makeIC.py SquareTest_2D/plotSolution.py SquareTest_2D/run.sh SquareTest_2D/square.yml \
-	     UniformBox_2D/makeIC.py UniformBox_2D/run.sh UniformBox_2D/uniformPlane.yml \
-	     UniformBox_3D/makeICbig.py UniformBox_3D/makeIC.py UniformBox_3D/run.sh UniformBox_3D/uniformBox.yml \
-	     Gravity_glass/makeIC.py Gravity_glass/README Gravity_glass/uniform_DM_box.yml \
-             ZeldovichPancake_3D/makeIC.py ZeldovichPancake_3D/zeldovichPancake.yml ZeldovichPancake_3D/run.sh ZeldovichPancake_3D/plotSolution.py
+EXTRA_DIST = Cooling/CoolingBox/coolingBox.yml Cooling/CoolingBox/plotEnergy.py Cooling/CoolingBox/makeIC.py Cooling/CoolingBox/run.sh Cooling/CoolingBox/getGlass.sh \
+             Cosmology/ConstantCosmoVolume/run.sh Cosmology/ConstantCosmoVolume/makeIC.py Cosmology/ConstantCosmoVolume/plotSolution.py Cosmology/ConstantCosmoVolume/constant_volume.yml \
+             Cosmology/ZeldovichPancake_3D/makeIC.py Cosmology/ZeldovichPancake_3D/zeldovichPancake.yml Cosmology/ZeldovichPancake_3D/run.sh Cosmology/ZeldovichPancake_3D/plotSolution.py \
+             EAGLE_low_z/EAGLE_6/eagle_6.yml EAGLE_low_z/EAGLE_6/getIC.sh EAGLE_low_z/EAGLE_6/README EAGLE_low_z/EAGLE_6/run.sh \
+	     EAGLE_low_z/EAGLE_12/eagle_12.yml EAGLE_low_z/EAGLE_12/getIC.sh EAGLE_low_z/EAGLE_12/README EAGLE_low_z/EAGLE_12/run.sh \
+	     EAGLE_low_z/EAGLE_25/eagle_25.yml EAGLE_low_z/EAGLE_25/getIC.sh EAGLE_low_z/EAGLE_25/README EAGLE_low_z/EAGLE_25/run.sh \
+	     EAGLE_low_z/EAGLE_50/eagle_50.yml EAGLE_low_z/EAGLE_50/getIC.sh EAGLE_low_z/EAGLE_50/README EAGLE_low_z/EAGLE_50/run.sh \
+	     EAGLE_low_z/EAGLE_100/eagle_100.yml EAGLE_low_z/EAGLE_100/getIC.sh EAGLE_low_z/EAGLE_100/README EAGLE_low_z/EAGLE_100/run.sh \
+	     EAGLE_DMO_low_z/EAGLE_DMO_12/eagle_12.yml EAGLE_DMO_low_z/EAGLE_DMO_12/getIC.sh EAGLE_DMO_low_z/EAGLE_DMO_12/README EAGLE_DMO_low_z/EAGLE_DMO_12/run.sh \
+	     EAGLE_DMO_low_z/EAGLE_DMO_25/eagle_25.yml EAGLE_DMO_low_z/EAGLE_DMO_25/getIC.sh EAGLE_DMO_low_z/EAGLE_DMO_25/README EAGLE_DMO_low_z/EAGLE_DMO_25/run.sh \
+	     EAGLE_DMO_low_z/EAGLE_DMO_50/eagle_50.yml EAGLE_DMO_low_z/EAGLE_DMO_50/getIC.sh EAGLE_DMO_low_z/EAGLE_DMO_50/README EAGLE_DMO_low_z/EAGLE_DMO_50/run.sh \
+	     EAGLE_DMO_low_z/EAGLE_DMO_100/eagle_100.yml EAGLE_DMO_low_z/EAGLE_DMO_100/getIC.sh EAGLE_DMO_low_z/EAGLE_DMO_100/README EAGLE_DMO_low_z/EAGLE_DMO_100/run.sh \
+	     GravityTests/ExternalPointMass/externalPointMass.yml GravityTests/ExternalPointMass/makeIC.py GravityTests/ExternalPointMass/run.sh GravityTests/ExternalPointMass/energy_plot.py \
+	     GravityTests/HydrostaticHalo/README GravityTests/HydrostaticHalo/hydrostatic.yml GravityTests/HydrostaticHalo/makeIC.py GravityTests/HydrostaticHalo/run.sh \
+	     GravityTests/HydrostaticHalo/density_profile.py GravityTests/HydrostaticHalo/velocity_profile.py GravityTests/HydrostaticHalo/internal_energy_profile.py GravityTests/HydrostaticHalo/test_energy_conservation.py \
+             GravityTests/IsothermalPotential/README GravityTests/IsothermalPotential/run.sh GravityTests/IsothermalPotential/energy_plot.py GravityTests/IsothermalPotential/isothermal.yml GravityTests/IsothermalPotential/makeIC.py \
+	     HydroTests/GreshoVortex_2D/getGlass.sh HydroTests/GreshoVortex_2D/gresho.yml HydroTests/GreshoVortex_2D/makeIC.py HydroTests/GreshoVortex_2D/plotSolution.py HydroTests/GreshoVortex_2D/run.sh \
+	     HydroTests/GreshoVortex_3D/getGlass.sh HydroTests/GreshoVortex_3D/gresho.yml HydroTests/GreshoVortex_3D/makeIC.py HydroTests/GreshoVortex_3D/plotSolution.py HydroTests/GreshoVortex_3D/run.sh \
+	     HydroTests/EvrardCollapse_3D/evrard.yml HydroTests/EvrardCollapse_3D/makeIC.py HydroTests/EvrardCollapse_3D/plotSolution.py HydroTests/EvrardCollapse_3D/run.sh HydroTests/EvrardCollapse_3D/getReference.sh \
+	     HydroTests/InteractingBlastWaves_1D/run.sh HydroTests/InteractingBlastWaves_1D/makeIC.py HydroTests/InteractingBlastWaves_1D/plotSolution.py HydroTests/InteractingBlastWaves_1D/interactingBlastWaves.yml HydroTests/InteractingBlastWaves_1D/getReference.sh \
+	     HydroTests/KelvinHelmholtz_2D/kelvinHelmholtz.yml HydroTests/KelvinHelmholtz_2D/makeIC.py HydroTests/KelvinHelmholtz_2D/plotSolution.py HydroTests/KelvinHelmholtz_2D/run.sh \
+             HydroTests/Noh_1D/makeIC.py HydroTests/Noh_1D/noh.yml HydroTests/Noh_1D/plotSolution.py HydroTests/Noh_1D/run.sh \
+             HydroTests/Noh_2D/makeIC.py HydroTests/Noh_2D/noh.yml HydroTests/Noh_2D/plotSolution.py HydroTests/Noh_2D/run.sh HydroTests/Noh_2D/getGlass.sh \
+             HydroTests/Noh_3D/makeIC.py HydroTests/Noh_3D/noh.yml HydroTests/Noh_3D/plotSolution.py HydroTests/Noh_3D/run.sh HydroTests/Noh_3D/getGlass.sh \
+	     HydroTests/PerturbedBox_2D/makeIC.py HydroTests/PerturbedBox_2D/perturbedPlane.yml \
+	     HydroTests/PerturbedBox_3D/makeIC.py HydroTests/PerturbedBox_3D/perturbedBox.yml HydroTests/PerturbedBox_3D/run.sh \
+	     HydroTests/SedovBlast_1D/makeIC.py HydroTests/SedovBlast_1D/plotSolution.py HydroTests/SedovBlast_1D/run.sh HydroTests/SedovBlast_1D/sedov.yml \
+	     HydroTests/SedovBlast_2D/getGlass.sh HydroTests/SedovBlast_2D/makeIC.py HydroTests/SedovBlast_2D/plotSolution.py HydroTests/SedovBlast_2D/run.sh HydroTests/SedovBlast_2D/sedov.yml \
+	     HydroTests/SedovBlast_3D/getGlass.sh HydroTests/SedovBlast_3D/makeIC.py HydroTests/SedovBlast_3D/plotSolution.py HydroTests/SedovBlast_3D/run.sh HydroTests/SedovBlast_3D/sedov.yml \
+             HydroTests/SineWavePotential_1D/makeIC.py HydroTests/SineWavePotential_1D/plotSolution.py HydroTests/SineWavePotential_1D/run.sh HydroTests/SineWavePotential_1D/sineWavePotential.yml \
+             HydroTests/SineWavePotential_2D/makeIC.py HydroTests/SineWavePotential_2D/plotSolution.py HydroTests/SineWavePotential_2D/run.sh HydroTests/SineWavePotential_2D/sineWavePotential.yml \
+             HydroTests/SineWavePotential_3D/makeIC.py HydroTests/SineWavePotential_3D/plotSolution.py HydroTests/SineWavePotential_3D/run.sh HydroTests/SineWavePotential_3D/sineWavePotential.yml \
+	     HydroTests/SodShock_1D/makeIC.py HydroTests/SodShock_1D/plotSolution.py HydroTests/SodShock_1D/run.sh HydroTests/SodShock_1D/sodShock.yml \
+	     HydroTests/SodShock_2D/getGlass.sh HydroTests/SodShock_2D/makeIC.py HydroTests/SodShock_2D/plotSolution.py HydroTests/SodShock_2D/run.sh HydroTests/SodShock_2D/sodShock.yml \
+	     HydroTests/SodShock_3D/getGlass.sh HydroTests/SodShock_3D/makeIC.py HydroTests/SodShock_3D/plotSolution.py HydroTests/SodShock_3D/run.sh HydroTests/SodShock_3D/sodShock.yml \
+	     HydroTests/SquareTest_2D/makeIC.py HydroTests/SquareTest_2D/plotSolution.py HydroTests/SquareTest_2D/run.sh HydroTests/SquareTest_2D/square.yml \
+	     HydroTests/UniformBox_2D/makeIC.py HydroTests/UniformBox_2D/run.sh HydroTests/UniformBox_2D/uniformPlane.yml \
+	     HydroTests/UniformBox_3D/makeICbig.py HydroTests/UniformBox_3D/makeIC.py HydroTests/UniformBox_3D/run.sh HydroTests/UniformBox_3D/uniformBox.yml \
+             SmallCosmoVolume/SmallCosmoVolume_hydro/README SmallCosmoVolume/SmallCosmoVolume_hydro/getIC.sh SmallCosmoVolume/SmallCosmoVolume_hydro/run.sh SmallCosmoVolume/SmallCosmoVolume_hydro/small_cosmo_volume.yml SmallCosmoVolume/SmallCosmoVolume_hydro/plotTempEvolution.py \
+             SmallCosmoVolume/SmallCosmoVolume_DM/README SmallCosmoVolume/SmallCosmoVolume_DM/getIC.sh SmallCosmoVolume/SmallCosmoVolume_DM/run.sh SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml SmallCosmoVolume/SmallCosmoVolume_DM/stf_input_6dfof_dmonly_sub.cfg \
+	     PMillennium/PMillennium-384/p-mill-384.yml \
+             PMillennium/PMillennium-768/p-mill-768.yml \
+             PMillennium/PMillennium-1536/p-mill-1536.yml \
+	     SantaBarbara/SantaBarbara-256/README SantaBarbara/SantaBarbara-256/getIC.sh SantaBarbara/SantaBarbara-256/santa_barbara.yml SantaBarbara/SantaBarbara-256/run.sh \
+	     SantaBarbara/SantaBarbara-128/README SantaBarbara/SantaBarbara-128/getIC.sh SantaBarbara/SantaBarbara-128/santa_barbara.yml SantaBarbara/SantaBarbara-128/run.sh
 
 # Default parameter file
 EXTRA_DIST += parameter_example.yml
-
-# Scripts to plot task graphs
-EXTRA_DIST += plot_tasks.py analyse_tasks.py process_plot_tasks_MPI process_plot_tasks
-
-# Scripts to plot threadpool 'task' graphs
-EXTRA_DIST += analyse_threadpool_tasks.py \
-              plot_threadpool.py \
-              process_plot_threadpool
-
-# Script for scaling plot
-EXTRA_DIST += plot_scaling_results.py \
-              plot_scaling_results_breakdown.py
-
-# Script for gravity accuracy
-EXTRA_DIST += plot_gravity_checks.py
diff --git a/examples/MultiTypes/makeIC.py b/examples/MultiTypes/makeIC.py
deleted file mode 100644
index 41a5ef5f2ffc4073ef8a4e93a130b43fcbe2c1f5..0000000000000000000000000000000000000000
--- a/examples/MultiTypes/makeIC.py
+++ /dev/null
@@ -1,189 +0,0 @@
-###############################################################################
- # This file is part of SWIFT.
- # Copyright (c) 2013 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
- #                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- # 
- # This program is free software: you can redistribute it and/or modify
- # it under the terms of the GNU Lesser General Public License as published
- # by the Free Software Foundation, either version 3 of the License, or
- # (at your option) any later version.
- # 
- # This program is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- # GNU General Public License for more details.
- # 
- # You should have received a copy of the GNU Lesser General Public License
- # along with this program.  If not, see <http://www.gnu.org/licenses/>.
- # 
- ##############################################################################
-
-import h5py
-import sys
-from numpy import *
-
-# Generates a swift IC file containing a cartesian distribution of DM particles
-# with a density of 1
-
-# Parameters
-periodic= 1           # 1 For periodic box
-boxSize = 1.
-Lgas = int(sys.argv[1])  # Number of particles along one axis
-rhoGas = 2.              # Density
-P = 1.                   # Pressure
-gamma = 5./3.            # Gas adiabatic index
-eta = 1.2349             # 48 ngbs with cubic spline kernel
-rhoDM = 1.
-Ldm = int(sys.argv[2])  # Number of particles along one axis
-
-massStars = 0.1
-Lstars = int(sys.argv[3])  # Number of particles along one axis
-
-fileBaseName = "multiTypes"
-num_files = int(sys.argv[4])
-
-#---------------------------------------------------
-numGas_tot = Lgas**3
-massGas = boxSize**3 * rhoGas / numGas_tot
-internalEnergy = P / ((gamma - 1.)*rhoGas)
-
-numDM_tot = Ldm**3
-massDM = boxSize**3 * rhoDM / numDM_tot
-
-numStars_tot = Lstars**3
-massStars = massDM * massStars
-
-
-#--------------------------------------------------
-
-offsetGas = 0
-offsetDM = 0
-offsetStars = 0
-
-for n in range(num_files):
-
-    # File name
-    if num_files == 1:
-        fileName = fileBaseName + ".hdf5"
-    else:
-        fileName = fileBaseName + ".%d.hdf5"%n
-        
-    # File
-    file = h5py.File(fileName, 'w')
-
-    # Number of particles
-    numGas = numGas_tot / num_files
-    numDM = numDM_tot / num_files
-    numStars = numStars_tot / num_files
-
-    if n == num_files - 1:
-        numGas += numGas_tot % num_files
-        numDM += numDM_tot % num_files
-        numStars += numStars_tot % num_files
-
-    
-    # Header
-    grp = file.create_group("/Header")
-    grp.attrs["BoxSize"] = boxSize
-    grp.attrs["NumPart_Total"] =  [numGas_tot, numDM_tot, 0, 0, numStars_tot, 0]
-    grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
-    grp.attrs["NumPart_ThisFile"] = [numGas, numDM, 0, 0, numStars, 0]
-    grp.attrs["Time"] = 0.0
-    grp.attrs["NumFilesPerSnapshot"] = num_files
-    grp.attrs["MassTable"] = [0.0, massDM, 0.0, 0.0, 0.0, 0.0]
-    grp.attrs["Flag_Entropy_ICs"] = 0
-    grp.attrs["Dimension"] = 3
-
-    #Runtime parameters
-    grp = file.create_group("/RuntimePars")
-    grp.attrs["PeriodicBoundariesOn"] = periodic
-    
-    #Units
-    grp = file.create_group("/Units")
-    grp.attrs["Unit length in cgs (U_L)"] = 1.
-    grp.attrs["Unit mass in cgs (U_M)"] = 1.
-    grp.attrs["Unit time in cgs (U_t)"] = 1.
-    grp.attrs["Unit current in cgs (U_I)"] = 1.
-    grp.attrs["Unit temperature in cgs (U_T)"] = 1.
-
-
-    # Gas Particle group
-    grp = file.create_group("/PartType0")
-
-    v  = zeros((numGas, 3))
-    ds = grp.create_dataset('Velocities', (numGas, 3), 'f', data=v)
-    
-    m = full((numGas, 1), massGas)
-    ds = grp.create_dataset('Masses', (numGas,1), 'f', data=m)
-    
-    h = full((numGas, 1), eta * boxSize / Lgas)
-    ds = grp.create_dataset('SmoothingLength', (numGas,1), 'f', data=h)
-    
-    u = full((numGas, 1), internalEnergy)
-    ds = grp.create_dataset('InternalEnergy', (numGas,1), 'f', data=u)
-
-    ids = linspace(offsetGas, offsetGas+numGas, numGas, endpoint=False).reshape((numGas,1))
-    ds = grp.create_dataset('ParticleIDs', (numGas, 1), 'L', data=ids+1)
-    x      = ids % Lgas;
-    y      = ((ids - x) / Lgas) % Lgas;
-    z      = (ids - x - Lgas * y) / Lgas**2;
-    coords = zeros((numGas, 3))
-    coords[:,0] = z[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
-    coords[:,1] = y[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
-    coords[:,2] = x[:,0] * boxSize / Lgas + boxSize / (2*Lgas)
-    ds = grp.create_dataset('Coordinates', (numGas, 3), 'd', data=coords)
-
-
-    
-    # DM Particle group
-    grp = file.create_group("/PartType1")
-
-    v  = zeros((numDM, 3))
-    ds = grp.create_dataset('Velocities', (numDM, 3), 'f', data=v)
-
-    m = full((numDM, 1), massDM)
-    ds = grp.create_dataset('Masses', (numDM,1), 'f', data=m)
-
-    ids = linspace(offsetDM, offsetDM+numDM, numDM, endpoint=False).reshape((numDM,1))
-    ds = grp.create_dataset('ParticleIDs', (numDM, 1), 'L', data=ids + numGas_tot + 1)
-    ds[()] = ids + Lgas**3 + 1
-    x      = ids % Ldm;
-    y      = ((ids - x) / Ldm) % Ldm;
-    z      = (ids - x - Ldm * y) / Ldm**2;
-    coords = zeros((numDM, 3))
-    coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    ds = grp.create_dataset('Coordinates', (numDM, 3), 'd', data=coords)
-
-
-
-    # Star Particle group
-    grp = file.create_group("/PartType4")
-
-    v  = zeros((numStars, 3))
-    ds = grp.create_dataset('Velocities', (numStars, 3), 'f', data=v)
-
-    m = full((numStars, 1), massStars)
-    ds = grp.create_dataset('Masses', (numStars,1), 'f', data=m)
-
-    ids = linspace(0, numStars, numStars, endpoint=False).reshape((numStars,1))
-    ds = grp.create_dataset('ParticleIDs', (numStars, 1), 'L', data=ids + numGas_tot + numDM_tot + 1)
-    x      = ids % Ldm;
-    y      = ((ids - x) / Ldm) % Ldm;
-    z      = (ids - x - Ldm * y) / Ldm**2;
-    coords = zeros((numStars, 3))
-    coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm)
-    ds = grp.create_dataset('Coordinates', (numStars, 3), 'd', data=coords)
-
-
-    
-    # Shift stuff
-    offsetGas += numGas
-    offsetDM += numDM
-    offsetStars += numStars
-    
-    file.close()
-
diff --git a/examples/MultiTypes/multiTypes.yml b/examples/MultiTypes/multiTypes.yml
deleted file mode 100644
index 04647f0f00e69f5baf2560aca0feeb14a26cc50a..0000000000000000000000000000000000000000
--- a/examples/MultiTypes/multiTypes.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-# Define the system of units to use internally. 
-InternalUnitSystem:
-  UnitMass_in_cgs:     1   # Grams
-  UnitLength_in_cgs:   1   # Centimeters
-  UnitVelocity_in_cgs: 1   # Centimeters per second
-  UnitCurrent_in_cgs:  1   # Amperes
-  UnitTemp_in_cgs:     1   # Kelvin
-
-# Parameters governing the time integration
-TimeIntegration:
-  time_begin: 0.    # The starting time of the simulation (in internal units).
-  time_end:   1.    # The end time of the simulation (in internal units).
-  dt_min:     1e-6  # The minimal time-step size of the simulation (in internal units).
-  dt_max:     1e-2  # The maximal time-step size of the simulation (in internal units).
-
-# Parameters governing the snapshots
-Snapshots:
-  basename:            multiTypes # Common part of the name of output files
-  time_first:          0.         # Time of the first output (in internal units)
-  delta_time:          0.01       # Time difference between consecutive outputs (in internal units)
-
-# Parameters governing the conserved quantities statistics
-Statistics:
-  delta_time:          1e-2 # Time between statistics output
-
-# Parameters for the hydrodynamics scheme
-SPH:
-  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
-  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
-
-# Parameters related to the initial conditions
-InitialConditions:
-  file_name:  ./multiTypes.hdf5     # The file to read
-  replicate:  2                     # Replicate all particles twice along each axis
-
-# External potential parameters
-PointMassPotential:
-  position:        [50.,50.,50.] # location of external point mass in internal units
-  mass:            1e10     # mass of external point mass in internal units
-  timestep_mult:   1e-2
diff --git a/examples/MultiTypes/run.sh b/examples/MultiTypes/run.sh
deleted file mode 100755
index 38cba70393861539f18bf9fa360d51f46dd3367d..0000000000000000000000000000000000000000
--- a/examples/MultiTypes/run.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# Generate the initial conditions if they are not present.
-if [ ! -e multiTypes.hdf5 ]
-then
-    echo "Generating initial conditions for the multitype box example..."
-    python makeIC.py 9 13 7 1
-fi
-
-../swift -s -g -S -t 1 multiTypes.yml 2>&1 | tee output.log
diff --git a/examples/PMillennium/PMillennium-1536/getIC.sh b/examples/PMillennium/PMillennium-1536/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0ba78a0fd11025b12a7a9a7be9de47c2b5ad2898
--- /dev/null
+++ b/examples/PMillennium/PMillennium-1536/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/PMillennium/PMill-1536.hdf5
diff --git a/examples/PMillennium/PMillennium-1536/p-mill-1536.yml b/examples/PMillennium/PMillennium-1536/p-mill-1536.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ea44572f90eb69330c973946cb5533a4c58b2c82
--- /dev/null
+++ b/examples/PMillennium/PMillennium-1536/p-mill-1536.yml
@@ -0,0 +1,51 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Planck-13 cosmology
+Cosmology:
+  h:              0.6777
+  a_begin:        0.02     # z_ini = 49
+  a_end:          1.0      # z_end = 0
+  Omega_m:        0.307
+  Omega_lambda:   0.693
+  Omega_b:        0.0455
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+Scheduler:
+  max_top_level_cells: 32
+  cell_split_size:     100
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            PMill
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025         
+  theta:                  0.5          
+  comoving_softening:     0.0208333  # 20.8333 kpc = 1/25 mean inter-particle separation
+  max_physical_softening: 0.0208333  # 20.8333 kpc = 1/25 mean inter-particle separation
+  mesh_side_length:       256
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:                   PMill-1536.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
diff --git a/examples/PMillennium/PMillennium-384/getIC.sh b/examples/PMillennium/PMillennium-384/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..eef95aac218a55e3e432fd1afb59ee7823c930fd
--- /dev/null
+++ b/examples/PMillennium/PMillennium-384/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/PMillennium/PMill-384.hdf5
diff --git a/examples/PMillennium/PMillennium-384/p-mill-384.yml b/examples/PMillennium/PMillennium-384/p-mill-384.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0040f580c8e67182949875d7d85cee2f75851654
--- /dev/null
+++ b/examples/PMillennium/PMillennium-384/p-mill-384.yml
@@ -0,0 +1,51 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Planck-13 cosmology
+Cosmology:
+  h:              0.6777
+  a_begin:        0.02     # z_ini = 49
+  a_end:          1.0      # z_end = 0
+  Omega_m:        0.307
+  Omega_lambda:   0.693
+  Omega_b:        0.0455
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+Scheduler:
+  max_top_level_cells: 16
+  cell_split_size:     100
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            PMill
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025         
+  theta:                  0.5          
+  comoving_softening:     0.08333  # 83.333 kpc = 1/25 mean inter-particle separation
+  max_physical_softening: 0.08333  # 83.333 kpc = 1/25 mean inter-particle separation
+  mesh_side_length:       64
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:                   PMill-384.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
diff --git a/examples/PMillennium/PMillennium-768/getIC.sh b/examples/PMillennium/PMillennium-768/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6c020a5bb4269d4fd4284c617cfd94ab19e87326
--- /dev/null
+++ b/examples/PMillennium/PMillennium-768/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/PMillennium/PMill-768.hdf5
diff --git a/examples/PMillennium/PMillennium-768/p-mill-768.yml b/examples/PMillennium/PMillennium-768/p-mill-768.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5ba97af72513dca5fdccd216f0ede78d0e279b0a
--- /dev/null
+++ b/examples/PMillennium/PMillennium-768/p-mill-768.yml
@@ -0,0 +1,51 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Planck-13 cosmology
+Cosmology:
+  h:              0.6777
+  a_begin:        0.02     # z_ini = 49
+  a_end:          1.0      # z_end = 0
+  Omega_m:        0.307
+  Omega_lambda:   0.693
+  Omega_b:        0.0455
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+Scheduler:
+  max_top_level_cells: 16
+  cell_split_size:     100
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            PMill
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025         
+  theta:                  0.5          
+  comoving_softening:     0.041666  # 41.6666 kpc = 1/25 mean inter-particle separation
+  max_physical_softening: 0.041666  # 41.6666 kpc = 1/25 mean inter-particle separation
+  mesh_side_length:       128
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:                   PMill-768.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
diff --git a/examples/PMillennium/README b/examples/PMillennium/README
new file mode 100644
index 0000000000000000000000000000000000000000..e4755bc214d438a282521b77a34ad853d0f36871
--- /dev/null
+++ b/examples/PMillennium/README
@@ -0,0 +1,9 @@
+Initial conditions for the P-Millennium simulation (Baugh et al. 2018)
+at various resolution. These are all DMONLY runs in a 800^3 Mpc^3
+volume with the cosmology given by Planck-13:
+
+ Om = 0.307, Ol = 0.693, h = 0.6777, Ob = 0.0455
+
+The ICs exist at different resolution. The Millennium simulation
+(Springel 2005) has a resolution in between the 1536^3 and 3072^3
+examples given here.
diff --git a/examples/SantaBarbara/SantaBarbara-128/README b/examples/SantaBarbara/SantaBarbara-128/README
new file mode 100644
index 0000000000000000000000000000000000000000..f86f1a4a4e1d16c3f4011c9e3ed8f35f643bd47e
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-128/README
@@ -0,0 +1,21 @@
+Initital conditions for the Santa-Barbara cluster comparison project.
+These have been regenerated from the orinigal Frenk et al. 1999 paper.
+
+The cosmology is Omega_m = 1, Omega_b = 0.1, h = 0.5 and sigma_8 = 0.9.
+
+The ICs are 128^3 particles in a 64^3 Mpc^3 volume. This is about 10x
+higher resolution than in the original paper. The ICs have been
+created for Gadget and the positions and box size are hence expressed
+in h-full units (e.g. box size of 32 / h Mpc). Similarly, the peculiar
+velocitites contain an extra sqrt(a) factor. 
+
+We will use SWIFT to cancel the h- and a-factors from the ICs. Gas
+particles will be generated at startup.
+
+MD5 check-sum of the ICS:
+1a1600b41002789b6057b1fa6333f3f0  SantaBarbara_128.hdf5
+
+You can use the script run_velociraptor.sh to also run a basic 3D FoF
+with VELOCIraptor on your output data. You will need to set the
+VELOCIRAPTOR_PATH environment variable to tell us where the stf-gas
+binary lives.
diff --git a/examples/SantaBarbara/SantaBarbara-128/getIC.sh b/examples/SantaBarbara/SantaBarbara-128/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..46500552980564e186d9c38ecfebd8e3f258f2b7
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-128/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/SantaBarbara/SantaBarbara_128.hdf5
diff --git a/examples/SantaBarbara/SantaBarbara-128/run.sh b/examples/SantaBarbara/SantaBarbara-128/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..72c219acb201b3a3541b6a08d799b21ba4638009
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-128/run.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# Run SWIFT
+../../swift --cosmology --hydro --self-gravity --threads=28 santa_barbara.yml
+
diff --git a/examples/SantaBarbara/SantaBarbara-128/santa_barbara.yml b/examples/SantaBarbara/SantaBarbara-128/santa_barbara.yml
new file mode 100644
index 0000000000000000000000000000000000000000..35a6e2762f91707ed43bd5f0107c3dd8a53e12e4
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-128/santa_barbara.yml
@@ -0,0 +1,61 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # 1 km/s 
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.5        
+  a_begin:        0.047619048        # z_ini = 20
+  a_end:          1.0                # z_end = 0
+  Omega_m:        1.0        
+  Omega_lambda:   0.0        
+  Omega_b:        0.1        
+  
+# Parameters governing the time integration
+TimeIntegration:
+  dt_max:     0.01
+  dt_min:     1e-10
+
+Scheduler:
+  max_top_level_cells: 16
+  cell_split_size:     100
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            santabarbara_low
+  scale_factor_first:  0.05
+  delta_time:          1.02
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1.02
+  scale_factor_first:   0.05
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025  
+  theta:                  0.5
+  comoving_softening:     0.02    # 20 kpc = 1/25 mean inter-particle separation
+  max_physical_softening: 0.00526 # 20 ckpc = 5.26 pkpc at z=2.8 (EAGLE-like evolution of softening).
+  mesh_side_length:       64
+
+# Parameters of the hydro scheme
+SPH:
+  resolution_eta:      1.2348   # "48 Ngb" with the cubic spline kernel
+  CFL_condition:       0.1
+  h_min_ratio:         0.1
+  initial_temperature: 1200.    # (1 + z_ini)^2 * 2.72K
+  minimal_temperature: 100.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./SantaBarbara_128.hdf5
+  periodic:   1
+  cleanup_h_factors: 1              # ICs were generated for Gadget, we need to get rid of h-factors
+  cleanup_velocity_factors: 1       # ICs were generated for Gadget, we need to get rid of sqrt(a) factors in the velocity
+  generate_gas_in_ics: 1            # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths: 1      # Since we generate gas, make use of the (expensive) cleaning-up procedure.
\ No newline at end of file
diff --git a/examples/SantaBarbara/SantaBarbara-256/README b/examples/SantaBarbara/SantaBarbara-256/README
new file mode 100644
index 0000000000000000000000000000000000000000..e5bba3752215c438c01ff35931d22901c3a9d0d3
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/README
@@ -0,0 +1,21 @@
+Initital conditions for the Santa-Barbara cluster comparison project.
+These have been regenerated from the orinigal Frenk et al. 1999 paper.
+
+The cosmology is Omega_m = 1, Omega_b = 0.1, h = 0.5 and sigma_8 = 0.9.
+
+The ICs are 256^3 particles in a 64^3 Mpc^3 volume. This is about 10x
+higher resolution than in the original paper. The ICs have been
+created for Gadget and the positions and box size are hence expressed
+in h-full units (e.g. box size of 32 / h Mpc). Similarly, the peculiar
+velocitites contain an extra sqrt(a) factor. 
+
+We will use SWIFT to cancel the h- and a-factors from the ICs. Gas
+particles will be generated at startup.
+
+MD5 check-sum of the ICS: 
+ba9ab4f00a70d39fa601a4a59984b343  SantaBarbara.hdf5
+
+You can use the script run_velociraptor.sh to also run a basic 3D FoF
+with VELOCIraptor on your output data. You will need to set the
+VELOCIRAPTOR_PATH environment variable to tell us where the stf-gas
+binary lives.
diff --git a/examples/SantaBarbara/SantaBarbara-256/getIC.sh b/examples/SantaBarbara/SantaBarbara-256/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a3073631ceedea47c8ac218a5e62529efee6fc56
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/getIC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/SantaBarbara.hdf5
diff --git a/examples/SantaBarbara/SantaBarbara-256/makeImage.py b/examples/SantaBarbara/SantaBarbara-256/makeImage.py
new file mode 100644
index 0000000000000000000000000000000000000000..db6416010447952b3edd6b235237d045b16bdefd
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/makeImage.py
@@ -0,0 +1,268 @@
+"""
+Makes an image of the Santa Barbara cluster.
+
+Requires py-sphviewer.
+
+Invoke as follows:
+
+python3 makeImage.py <name of hdf5 file> \
+                     <number of particle type (i.e. 0 or 1)> \
+                     <colour map to use (default viridis)> \
+                     <text color (default white)> \
+                     <image resolution (default 2048x2048)>
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import h5py
+import matplotlib
+
+from sphviewer.tools import QuickView
+from matplotlib.patches import Rectangle
+
+from typing import Tuple
+from collections import namedtuple
+
+
+# Set up our simulation data collection to keep stuff together
+SimulationData = namedtuple(
+    "SimulationData",
+    ["coordinates", "masses", "sph_name", "dark_matter_mass", "swift_name", "boxsize"],
+)
+
+
+def latex_float(f):
+    """
+    Taken from:
+    https://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python.
+    
+    Formats a float to LaTeX style.
+    """
+
+    float_str = "{0:.2g}".format(f)
+    if "e" in float_str:
+        base, exponent = float_str.split("e")
+        return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
+    else:
+        return float_str
+
+
+def read_data_from_file(filename: str, part_type=0) -> SimulationData:
+    """
+    Reads the relevant data from the HDF5 file.
+    """
+    part_type_name = f"PartType{part_type}"
+
+    with h5py.File(filename, "r") as file:
+        coordinates, boxsize = move_box(file[f"{part_type_name}/Coordinates"][...].T)
+        masses = file[f"{part_type_name}/Masses"][...]
+
+        sph_name = file["HydroScheme"].attrs["Scheme"].decode("utf-8")
+        unit_mass = (
+            float(file["Units"].attrs["Unit mass in cgs (U_M)"]) / 2e33
+        )  # in M_sun
+
+        dark_matter_mass = float(file["PartType1/Masses"][0]) * unit_mass
+
+        code_revision = file["Code"].attrs["Git Revision"].decode("utf-8")
+        swift_name = f"SWIFT {code_revision}"
+
+        data = SimulationData(
+            coordinates=coordinates,
+            masses=masses,
+            sph_name=sph_name,
+            dark_matter_mass=dark_matter_mass,
+            swift_name=swift_name,
+            boxsize=boxsize,
+        )
+
+    return data
+
+
+def move_box(coordinates: np.ndarray) -> np.ndarray:
+    """
+    Takes the coordinates and moves them in the x-y plane. This moves them 20
+    code units to the left/right to ensure that the zoomed-out version of the
+    cluster image is nicely shown
+    """
+
+    boxsize = np.max(coordinates[0])
+    coordinates[0] -= 20
+    coordinates[1] -= 20
+    coordinates[0] %= boxsize
+    coordinates[1] %= boxsize
+
+    return coordinates, boxsize
+
+
+def generate_views(data: SimulationData, res=2048) -> Tuple[np.ndarray]:
+    """
+    Generates the views on the data from py-sphviewer.
+
+    Returns the overall image for the whole box and then a zoomed region.
+    """
+
+    qv_all = QuickView(
+        data.coordinates,
+        data.masses,
+        r="infinity",
+        plot=False,
+        xsize=res,
+        ysize=res,
+        logscale=False,
+        p=0,
+        np=48,
+    )
+    zoomed_res = (res * 6) // 10
+    mask = np.logical_and(
+        np.logical_and(
+            data.coordinates[0] > (data.boxsize/2-4-20),
+            data.coordinates[0] < (data.boxsize/2+6-20)
+        ),
+        np.logical_and(
+            data.coordinates[1] > (data.boxsize/2-3.5-20),
+            data.coordinates[1] < (data.boxsize/2+6.5-20)
+        )
+    )
+    qv_zoomed = QuickView(
+        data.coordinates.T[mask].T,
+        data.masses[mask],
+        r="infinity",
+        plot=False,
+        xsize=zoomed_res,
+        ysize=zoomed_res,
+        logscale=False,
+        np=48,
+    )
+
+    return qv_all.get_image(), qv_zoomed.get_image()
+
+
+def create_plot(data: SimulationData, res=2048, cmap="viridis", text_color="white"):
+    """
+    Creates a figure and axes object and returns them for you to do with what you wish.
+    """
+
+    img_all, img_zoomed = generate_views(data, res)
+
+    fig, ax = plt.subplots(figsize=(8, 8))
+
+    # Set up in "image" mode
+    ax.axis("off")
+    fig.subplots_adjust(0, 0, 1, 1)
+
+    ax.imshow(
+        np.log10(img_all + np.min(img_all[img_all != 0])),
+        origin="lower",
+        extent=[-1, 1, -1, 1],
+        cmap=cmap,
+    )
+
+    lower_left = [(-24 / (0.5 * data.boxsize)), (-23.5 / (0.5 * data.boxsize))]
+    zoom_rect = Rectangle(
+        lower_left,
+        10 / (0.5 * data.boxsize),
+        10 / (0.5 * data.boxsize),
+        linewidth=2,
+        edgecolor=text_color,
+        facecolor="none",
+    )
+    ax.add_patch(zoom_rect)
+
+    # Remove ticks as we want "image mode"
+    ax2 = fig.add_axes([0.35, 0.35, 0.6, 0.6], frame_on=True, xticks=[], yticks=[])
+
+    ax2.imshow(
+        np.log10(img_zoomed + np.min(img_zoomed[img_zoomed != 0])),
+        origin="lower",
+        extent=[-1, 1, -1, 1],
+        cmap=cmap,
+    )
+
+    # This ugly hack sets the box around the subfigure to be white
+    for child in ax2.get_children():
+        if isinstance(child, matplotlib.spines.Spine):
+            child.set_color(text_color)
+            child.set_linewidth(2)
+
+    # Draw lines between boxes
+
+    # Bottom Right
+    ax.plot(
+        [(-14 / (0.5 * data.boxsize)), 0.9],
+        [(-23.5 / (0.5 * data.boxsize)), -0.3],
+        lw=2,
+        color=text_color,
+    )
+    # Top Left
+    ax.plot(
+        [(-24 / (0.5 * data.boxsize)), -0.3],
+        [(-13.5 / (0.5 * data.boxsize)), 0.9],
+        lw=2,
+        color=text_color,
+    )
+
+    ax.text(0.95, -0.95, data.swift_name, color=text_color, ha="right")
+    formatted_dark_matter_mass = latex_float(data.dark_matter_mass)
+    ax.text(
+        -0.95,
+        0.95,
+        rf"M$_{{\rm DM}} = {formatted_dark_matter_mass}$ M$_\odot$",
+        color=text_color,
+        va="top",
+    )
+    ax.text(
+        -0.95,
+        -0.95,
+        data.sph_name + "\n" + r"Santa Barbara Cluster (re-ran from Frenk+ 1999)",
+        color=text_color,
+    )
+
+    return fig, ax
+
+
+if __name__ == "__main__":
+    import sys
+
+    try:
+        filename = sys.argv[1]
+    except IndexError:
+        filename = "santabarbara_0153.hdf5"
+
+    try:
+        part_type = int(sys.argv[2])
+    except IndexError:
+        part_type = 0
+
+    try:
+        cmap = sys.argv[3]
+    except IndexError:
+        cmap = "viridis"
+
+    try:
+        text_color = sys.argv[4]
+    except IndexError:
+        text_color = "white"
+
+    try:
+        res = int(sys.argv[5])
+    except IndexError:
+        res = 2048
+
+    # Read in the data from file
+
+    try:
+        data = read_data_from_file(filename, part_type)
+    except IndexError:
+        # Must be a dark matter only run
+        part_type = 1
+        data = read_data_from_file(filename, part_type)
+
+    # Make the plot
+
+    fig, ax = create_plot(data, res, cmap, text_color)
+
+    fig.savefig(
+        f"SantaBarbara_{data.sph_name[:8]}_{cmap}_PartType{part_type}_res{res}.png",
+        dpi=res // 8,
+    )
diff --git a/examples/SantaBarbara/SantaBarbara-256/make_plots.sh b/examples/SantaBarbara/SantaBarbara-256/make_plots.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f0ed6b3ae13acf03cb6abf8c00203462800da681
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/make_plots.sh
@@ -0,0 +1,4 @@
+python3 makeImage.py santabarbara_0153.hdf5 0 twilight white
+python3 plotSolution.py 153 halo
+python3 plotTempEvolution.py
+python3 rhoTHaloComparison.py
diff --git a/examples/SantaBarbara/SantaBarbara-256/plotSmoothingLength.py b/examples/SantaBarbara/SantaBarbara-256/plotSmoothingLength.py
new file mode 100644
index 0000000000000000000000000000000000000000..634cc2758d911eedbf4d2889e48011144da3d3ee
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/plotSmoothingLength.py
@@ -0,0 +1,162 @@
+"""
+Plots the smoothing length (compared to the softening).
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import h5py
+
+from collections import namedtuple
+
+SnapshotData = namedtuple(
+    "SnapshotData",
+    [
+        "smoothing_lengths",
+        "particle_ids",
+        "softening",
+        "internal_length",
+        "snapshot_length",
+    ],
+)
+
+HaloCatalogueData = namedtuple(
+    "HaloCatalogueData", ["largest_halo", "particle_ids_in_largest_halo"]
+)
+
+
+def load_data(filename: str) -> SnapshotData:
+    """
+    Loads the data that we need, i.e. the smoothing lengths and the
+    softening length, from the snapshot.
+    """
+
+    with h5py.File(filename, "r") as handle:
+        data = SnapshotData(
+            smoothing_lengths=handle["PartType0/SmoothingLength"][...],
+            particle_ids=handle["PartType0/ParticleIDs"][...],
+            softening=handle["GravityScheme"].attrs[
+                "Comoving softening length [internal units]"
+            ][0],
+            internal_length=handle["InternalCodeUnits"].attrs[
+                "Unit length in cgs (U_L)"
+            ][0],
+            snapshot_length=handle["Units"].attrs["Unit length in cgs (U_L)"][0],
+        )
+
+    return data
+
+
+def load_halo_data(halo_filename: str) -> HaloCatalogueData:
+    """
+    Loads the halo data and finds the particle IDs that belong to
+    the largest halo. The halo filename should be given without
+    any extension as we need a couple of files to complete this.
+    """
+
+    catalogue_filename = f"{halo_filename}.properties"
+    groups_filename = f"{halo_filename}.catalog_groups"
+    particles_filename = f"{halo_filename}.catalog_particles"
+
+    with h5py.File(catalogue_filename, "r") as handle:
+        largest_halo = np.where(
+            handle["Mass_200crit"][...] == handle["Mass_200crit"][...].max()
+        )[0][0]
+
+    with h5py.File(groups_filename, "r") as handle:
+        offset_begin = handle["Offset"][largest_halo]
+        offset_end = handle["Offset"][largest_halo + 1]
+
+    with h5py.File(particles_filename, "r") as handle:
+        particle_ids = handle["Particle_IDs"][offset_begin:offset_end]
+
+    return HaloCatalogueData(
+        largest_halo=largest_halo, particle_ids_in_largest_halo=particle_ids
+    )
+
+
+def make_plot(
+    snapshot_filename: str,
+    halo_filename: str,
+    output_filename="smoothing_length_variation.png",
+) -> None:
+    """
+    Makes the plot and saves it in output_filename.
+
+    The halo filename should be provided without extension.
+    """
+
+    data = load_data(filename)
+    halo_data = load_halo_data(halo_filename)
+
+    smoothing_lengths_in_halo = data.smoothing_lengths[
+        np.in1d(data.particle_ids, halo_data.particle_ids_in_largest_halo)
+    ]
+
+    softening = data.softening * (data.snapshot_length / data.internal_length)
+
+    fig, ax = plt.subplots(1)
+
+    ax.semilogy()
+
+    ax.hist(data.smoothing_lengths, bins="auto", label="All particles")
+    ax.hist(
+        smoothing_lengths_in_halo,
+        bins="auto",
+        label=f"Particles in largest halo (ID={halo_data.largest_halo})",
+    )
+    ax.axvline(x=softening, label="Softening", ls="--", color="grey")
+
+    ax.legend()
+
+    ax.set_xlabel("Smoothing length")
+    ax.set_ylabel("Number of particles")
+
+    ax.set_xlim(0, ax.get_xlim()[1])
+
+    fig.tight_layout()
+
+    fig.savefig(output_filename, dpi=300)
+
+    return
+
+
+if __name__ == "__main__":
+    import argparse as ap
+
+    PARSER = ap.ArgumentParser(
+        description="""
+            Makes a plot of the smoothing lengths in the box, compared
+            to the gravitational softening. Also splits out the particles
+            that are contained in the largest halo according to the
+            velociraptor outputs.
+            """
+    )
+
+    PARSER.add_argument(
+        "-s",
+        "--snapshot",
+        help="""
+            Filename and path for the snapshot (without the .hdf5),
+            Default: ./santabarbara_0153
+            """,
+        required=False,
+        default="./santabarbara_0153",
+    )
+
+    PARSER.add_argument(
+        "-v",
+        "--velociraptor",
+        help="""
+            The filename and path of the velociraptor files, excluding
+            the descriptors (i.e. without .catalog_particles).
+            Default: ./halo/santabarbara
+            """,
+        required=False,
+        default="./halo/santabarbara",
+    )
+
+    ARGS = vars(PARSER.parse_args())
+
+    filename = f"{ARGS['snapshot']}.hdf5"
+
+    make_plot(filename, ARGS["velociraptor"])
diff --git a/examples/SantaBarbara/SantaBarbara-256/plotSolution.py b/examples/SantaBarbara/SantaBarbara-256/plotSolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..131fae8f44dc3d0092e26e87a80a7862094dd4ba
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/plotSolution.py
@@ -0,0 +1,396 @@
+"""
+Plots the "solution" (i.e. some profiles) for the Santa Barbara cluster.
+
+Invoke as follows:
+
+python3 plotSolution.py <snapshot number> <catalogue directory> <number of bins (optional)>
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+import h5py
+
+from collections import namedtuple
+from typing import Tuple
+
+try:
+    import makeImage
+
+    create_images = True
+except:
+    create_images = False
+
+# Simulation data
+SimulationParticleData = namedtuple(
+    "SimulationData", ["gas", "dark_matter", "metadata"]
+)
+ParticleData = namedtuple(
+    "ParticleData", ["coordinates", "radii", "masses", "densities", "energies"]
+)
+MetaData = namedtuple("MetaData", ["header", "code", "hydroscheme"])
+HaloData = namedtuple("HaloData", ["c", "Rvir", "Mvir", "center"])
+
+
+def get_energies(handle: h5py.File):
+    """
+    Gets the energies with the correct units.
+    """
+    u = handle["PartType0/InternalEnergy"][:]
+    unit_length_in_cgs = handle["/Units"].attrs["Unit length in cgs (U_L)"]
+    unit_mass_in_cgs = handle["/Units"].attrs["Unit mass in cgs (U_M)"]
+    unit_time_in_cgs = handle["/Units"].attrs["Unit time in cgs (U_t)"]
+    gas_gamma = handle["/HydroScheme"].attrs["Adiabatic index"][0]
+    a = handle["/Cosmology"].attrs["Scale-factor"][0]
+
+    unit_length_in_si = 0.01 * unit_length_in_cgs
+    unit_mass_in_si = 0.001 * unit_mass_in_cgs
+    unit_time_in_si = unit_time_in_cgs
+
+    u *= unit_length_in_si ** 2 / unit_time_in_si ** 2
+    u /= a ** (3 * (gas_gamma - 1.))
+
+    return u
+
+
+def load_data(filename: str, center: np.array) -> SimulationParticleData:
+    """
+    Loads the relevant data for making the profiles, as well as some metadata
+    for the plot.
+
+    Center is the center of the SB cluster and is used to calculate the radial
+    distances to the particles.
+    """
+
+    with h5py.File(filename, "r") as file:
+        gas_handle = file["PartType0"]
+        dm_handle = file["PartType1"]
+
+        gas_data = ParticleData(
+            coordinates=gas_handle["Coordinates"][...],
+            radii=get_radial_distances(gas_handle["Coordinates"][...], center),
+            masses=gas_handle["Masses"][...],
+            energies=get_energies(file),
+            densities=gas_handle["Density"][...],
+        )
+
+        dm_data = ParticleData(
+            coordinates=dm_handle["Coordinates"][...],
+            radii=get_radial_distances(dm_handle["Coordinates"][...], center),
+            masses=dm_handle["Masses"][...],
+            energies=None,
+            densities=None,
+        )
+
+        metadata = MetaData(
+            header=dict(file["Header"].attrs),
+            code=dict(file["Code"].attrs),
+            hydroscheme=dict(file["HydroScheme"].attrs),
+        )
+
+        simulation_data = SimulationParticleData(
+            gas=gas_data, dark_matter=dm_data, metadata=metadata
+        )
+
+    return simulation_data
+
+
+def get_halo_data(catalogue_filename: str) -> HaloData:
+    """
+    Gets the halo center of the largest halo (i.e. the SB cluster).
+
+    You will want the .properties file, probably
+
+    halo/santabarbara.properties
+
+    that is given by VELOCIraptor.
+    """
+
+
+    with h5py.File(catalogue_filename, "r") as file:
+        largest_halo = np.where(
+            file["Mass_200crit"][...] == file["Mass_200crit"][...].max()
+        )
+
+        x = float(np.take(file["Xc"], largest_halo))
+        y = float(np.take(file["Yc"], largest_halo))
+        z = float(np.take(file["Zc"], largest_halo))
+        Mvir = float(np.take(file["Mass_200crit"], largest_halo))
+        Rvir = float(np.take(file["R_200crit"], largest_halo))
+        c = float(np.take(file["cNFW"], largest_halo))
+
+    return HaloData(c=c, Rvir=Rvir, Mvir=Mvir, center=np.array([x, y, z]))
+
+
+def get_radial_distances(coordinates: np.ndarray, center: np.array) -> np.array:
+    """
+    Gets the radial distances for all particles.
+    """
+    dx = coordinates - center
+
+    return np.sqrt(np.sum(dx * dx, axis=1))
+
+
+def get_radial_density_profile(radii, masses, bins: int) -> Tuple[np.ndarray]:
+    """
+    Gets the radial gas density profile, after generating similar bins to those
+    used in similar works.
+    """
+
+    bins = np.logspace(-2, 1, bins)
+
+    histogram, bin_edges = np.histogram(a=radii, weights=masses, bins=bins)
+
+    volumes = np.array(
+        [
+            (4. * np.pi / 3.) * (r_outer ** 3 - r_inner ** 3)
+            for r_outer, r_inner in zip(bin_edges[1:], bin_edges[:-1])
+        ]
+    )
+
+    return histogram / volumes, bin_edges  # densities
+
+
+def mu(T, H_frac, T_trans):
+    """
+    Get the molecular weight as a function of temperature.
+    """
+    if T > T_trans:
+        return 4. / (8. - 5. * (1. - H_frac))
+    else:
+        return 4. / (1. + 3. * H_frac)
+
+
+def T(u, metadata: MetaData):
+    """
+    Temperature of primordial gas.
+    """
+
+    gas_gamma = metadata.hydroscheme["Adiabatic index"][0]
+    H_frac = metadata.hydroscheme["Hydrogen mass fraction"][0]
+    T_trans = metadata.hydroscheme["Hydrogen ionization transition temperature"][0]
+
+    k_in_J_K = 1.38064852e-23
+    mH_in_kg = 1.6737236e-27
+
+    T_over_mu = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = T_over_mu > (T_trans + 1) / mu(T_trans + 1, H_frac, T_trans)
+    if np.sum(mask_ionized) > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans * 10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = T_over_mu < (T_trans - 1) / mu((T_trans - 1), H_frac, T_trans)
+    if np.sum(mask_neutral) > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+
+    return ret
+
+
+def get_radial_temperature_profile(
+    data: SimulationParticleData, bins: int
+) -> np.ndarray:
+    """
+    Gets the radial gas density profile, after generating similar bins to those
+    used in similar works.
+    """
+
+    temperatures = T(data.gas.energies, data.metadata)
+    radii = data.gas.radii
+
+    bins = np.logspace(-2, 1, bins)
+
+    histogram, _ = np.histogram(a=radii, weights=temperatures, bins=bins)
+
+    counts, _ = np.histogram(a=radii, weights=np.ones_like(radii), bins=bins)
+
+    return histogram / counts  # need to get mean value in bin
+
+
+def get_radial_entropy_profile(data: SimulationParticleData, bins: int) -> np.ndarray:
+    """
+    Gets the radial gas density profile, after generating similar bins to those
+    used in similar works.
+    """
+
+    gas_gamma = data.metadata.hydroscheme["Adiabatic index"][0]
+    gamma_minus_one = gas_gamma - 1.0
+
+    entropies = (
+        data.gas.energies * (gamma_minus_one) / data.gas.densities ** gamma_minus_one
+    )
+    print("Warning: Current entropy profile assumes all gas is ionised")
+    radii = data.gas.radii
+
+    bins = np.logspace(-2, 1, bins)
+
+    histogram, _ = np.histogram(a=radii, weights=entropies, bins=bins)
+
+    counts, _ = np.histogram(a=radii, weights=np.ones_like(radii), bins=bins)
+
+    return histogram / counts  # need to get mean value in bin
+
+
+def nfw(R, halo_data: HaloData):
+    """
+    NFW profile at radius R.
+    """
+
+    R_s = halo_data.Rvir / halo_data.c
+    rho_0 = (4 * np.pi * R_s ** 3) / (halo_data.Mvir)
+    rho_0 *= np.log(1 + halo_data.c) - halo_data.c / (halo_data.c + 1)
+    rho_0 = 1.0 / rho_0
+
+    ratio = R / R_s
+
+    return rho_0 / (ratio * (1 + ratio) ** 2)
+
+
+def create_plot(
+    data: SimulationParticleData,
+    halo_data: HaloData,
+    bins: int,
+    create_images: bool,
+    image_data: np.ndarray,
+):
+    """
+    Creates the figure and axes objects and plots the data on them.
+    """
+
+    fig, axes = plt.subplots(2, 3, figsize=(12, 8))
+
+    gas_density, bin_edges = get_radial_density_profile(
+        data.gas.radii, data.gas.masses, bins=bins
+    )
+    dm_density, _ = get_radial_density_profile(
+        data.dark_matter.radii, data.dark_matter.masses, bins=bins
+    )
+    temperature = get_radial_temperature_profile(data, bins=bins)
+    entropy = get_radial_entropy_profile(data, bins=bins)
+
+    bin_centers = [0.5 * (x + y) for x, y in zip(bin_edges[:-1], bin_edges[1:])]
+    nfw_R = np.logspace(-2, 1, bins * 100)
+    nfw_rho = nfw(nfw_R, halo_data)
+
+    axes[0][0].loglog()
+    axes[0][0].plot(nfw_R, 0.1 * nfw_rho, ls="dashed", color="grey")
+    axes[0][0].scatter(bin_centers, gas_density)
+    axes[0][0].set_ylabel(r"$\rho_{\rm gas} (R)$ [$10^{10}$ M$_\odot$ Mpc$^{-3}$]")
+    axes[0][0].set_xlabel(r"R [Mpc]")
+    axes[0][0].set_xlim(0.01, 10)
+
+    axes[0][1].semilogx()
+    axes[0][1].scatter(bin_centers, np.log(entropy))
+    axes[0][1].set_ylabel(
+        r"Entropy $\log(A$ [K ($10^{10}$ M$_\odot$)$^{2/3}$ Mpc$^{-2}$])"
+    )
+    axes[0][1].set_xlabel(r"R [Mpc]")
+    axes[0][1].set_xlim(0.01, 10)
+
+    if create_images:
+        axes[0][2].imshow(np.log10(image_data))
+
+    axes[0][2].set_xticks([])
+    axes[0][2].set_yticks([])
+
+    axes[1][0].loglog()
+    axes[1][0].scatter(bin_centers, temperature)
+    axes[1][0].set_ylabel(r"$T_{\rm gas} (R)$ [K]")
+    axes[1][0].set_xlabel(r"R [Mpc]")
+    axes[1][0].set_xlim(0.01, 10)
+
+    axes[1][1].loglog()
+    axes[1][1].scatter(bin_centers, dm_density)
+    axes[1][1].plot(nfw_R, 0.9 * nfw_rho, ls="dashed", color="grey")
+    axes[1][1].set_ylabel(r"$\rho_{\rm DM} (R)$ [$10^{10}$ M$_\odot$ Mpc$^{-3}$]")
+    axes[1][1].set_xlabel(r"R [Mpc]")
+    axes[1][1].set_xlim(0.01, 10)
+    axes[1][1].text(
+        0.02,
+        5,
+        "$c_{{vir}} = {:2.2f}$\n$R_{{vir}} = {:2.2f}$ Mpc\n$M_{{vir}} = {:2.2f}$ $10^{{10}}$ M$_\odot$".format(
+            halo_data.c, halo_data.Rvir, halo_data.Mvir
+        ),
+        va="bottom",
+        ha="left",
+    )
+
+    axes[1][2].text(
+        -0.49,
+        0.7,
+        "Santa Barbara with $\\gamma={:2.2f}$ in 3D".format(
+            data.metadata.hydroscheme["Adiabatic index"][0]
+        ),
+    )
+
+    scheme_list = data.metadata.hydroscheme["Scheme"].decode("utf-8").split(" ")
+    i = 4
+    while i < len(scheme_list):
+        scheme_list.insert(i, "\n")
+        i += 4 + 1
+    wrapped_scheme = " ".join(scheme_list)
+    wrapped_scheme.replace("\n ", "\n")
+
+    axes[1][2].text(-0.49, 0.8, wrapped_scheme)
+
+    axes[1][2].plot([-0.49, 0.1], [0.62, 0.62], "k-", lw=1)
+
+    axes[1][2].text(
+        -0.49, 0.5, f"SWIFT {data.metadata.code['Git Revision'].decode('utf-8')}"
+    )
+
+    axes[1][2].text(
+        -0.49,
+        0.3,
+        data.metadata.hydroscheme["Kernel function"].decode("utf-8"),
+        fontsize=10,
+    )
+    axes[1][2].text(
+        -0.49,
+        0.2,
+        "{:2.3f} neighbours ($\\eta={:3.3f}$)".format(
+            data.metadata.hydroscheme["Kernel target N_ngb"][0],
+            data.metadata.hydroscheme["Kernel eta"][0],
+        ),
+    )
+    axes[1][2].set_xlim(-0.5, 0.5)
+    axes[1][2].set_ylim(0, 1)
+    axes[1][2].axis("off")
+
+    fig.tight_layout()
+
+    return fig, axes
+
+
+if __name__ == "__main__":
+    import sys
+
+    filename = "santabarbara_{:04d}.hdf5".format(int(sys.argv[1]))
+    catalogue_filename = f"{sys.argv[2]}/santabarbara.properties"
+
+    try:
+        bins = int(sys.argv[3])
+    except:
+        bins = 25
+
+    halo_data = get_halo_data(catalogue_filename)
+    simulation_data = load_data(filename, halo_data.center)
+
+    if create_images:
+        data = makeImage.read_data_from_file(filename, part_type=0)
+        _, image_data = makeImage.generate_views(data)
+        del data
+    else:
+        image_data = None
+
+    fig, _ = create_plot(
+        data=simulation_data,
+        halo_data=halo_data,
+        bins=bins,
+        create_images=create_images,
+        image_data=image_data,
+    )
+
+    fig.savefig("santabarbara.png", dpi=300)
diff --git a/examples/SantaBarbara/SantaBarbara-256/plotTempEvolution.py b/examples/SantaBarbara/SantaBarbara-256/plotTempEvolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..90de6cb712744359dbdfbf07cc4ed81546ea38bf
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/plotTempEvolution.py
@@ -0,0 +1,183 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+# Number of snapshots generated
+n_snapshots = 153
+snapname = "santabarbara"
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 9,
+'legend.fontsize': 9,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (3.15,3.15),
+'figure.subplot.left'    : 0.14,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.12,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 2.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+# Read the simulation data
+sim = h5py.File("%s_0000.hdf5" % snapname, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs["Hydrogen ionization transition temperature"][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal ean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4. / (8. - 5. * (1. - H_frac))
+    else:
+        return 4. / (1. + 3. * H_frac)
+    
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = (T_over_mu > (T_trans+1) / mu(T_trans+1, H_frac, T_trans))
+    if np.sum(mask_ionized)  > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans*10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = (T_over_mu < (T_trans-1) / mu((T_trans-1), H_frac, T_trans))
+    if np.sum(mask_neutral)  > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+        
+    return ret
+
+
+z = np.zeros(n_snapshots)
+a = np.zeros(n_snapshots)
+T_mean = np.zeros(n_snapshots)
+T_std = np.zeros(n_snapshots)
+T_log_mean = np.zeros(n_snapshots)
+T_log_std = np.zeros(n_snapshots)
+T_median = np.zeros(n_snapshots)
+T_min = np.zeros(n_snapshots)
+T_max = np.zeros(n_snapshots)
+
+# Loop over all the snapshots
+for i in range(n_snapshots):
+    sim = h5py.File("%s_%04d.hdf5"% (snapname, i), "r")
+
+    z[i] = sim["/Cosmology"].attrs["Redshift"][0]
+    a[i] = sim["/Cosmology"].attrs["Scale-factor"][0]
+
+    u = sim["/PartType0/InternalEnergy"][:]
+
+    # Compute the temperature
+    u *= (unit_length_in_si**2 / unit_time_in_si**2)
+    u /= a[i]**(3 * (gas_gamma - 1.))
+    Temp = T(u)
+
+    # Gather statistics
+    T_median[i] = np.median(Temp)
+    T_mean[i] = Temp.mean()
+    T_std[i] = Temp.std()
+    T_log_mean[i] = np.log10(Temp).mean()
+    T_log_std[i] = np.log10(Temp).std()
+    T_min[i] = Temp.min()
+    T_max[i] = Temp.max()
+
+# CMB evolution
+a_evol = np.logspace(-3, 0, 60)
+T_cmb = (1. / a_evol)**2 * 2.72
+
+# Plot the interesting quantities
+figure()
+subplot(111, xscale="log", yscale="log")
+
+fill_between(a, T_mean-T_std, T_mean+T_std, color='C0', alpha=0.1)
+plot(a, T_max, ls='-.', color='C0', lw=1., label="${\\rm max}~T$")
+plot(a, T_min, ls=':', color='C0', lw=1., label="${\\rm min}~T$")
+plot(a, T_mean, color='C0', label="${\\rm mean}~T$", lw=1.5)
+fill_between(a, 10**(T_log_mean-T_log_std), 10**(T_log_mean+T_log_std), color='C1', alpha=0.1)
+plot(a, 10**T_log_mean, color='C1', label="${\\rm mean}~{\\rm log} T$", lw=1.5)
+plot(a, T_median, color='C2', label="${\\rm median}~T$", lw=1.5)
+
+legend(loc="upper left", frameon=False, handlelength=1.5)
+
+# Expected lines
+plot([1e-10, 1e10], [H_transition_temp, H_transition_temp], 'k--', lw=0.5, alpha=0.7)
+text(2.5e-2, H_transition_temp*1.07, "$T_{\\rm HII\\rightarrow HI}$", va="bottom", alpha=0.7, fontsize=8)
+plot([1e-10, 1e10], [T_minimal, T_minimal], 'k--', lw=0.5, alpha=0.7)
+text(1e-2, T_minimal*0.8, "$T_{\\rm min}$", va="top", alpha=0.7, fontsize=8)
+plot(a_evol, T_cmb, 'k--', lw=0.5, alpha=0.7)
+text(a_evol[20], T_cmb[20]*0.55, "$(1+z)^2\\times T_{\\rm CMB,0}$", rotation=-34, alpha=0.7, fontsize=8, va="top", bbox=dict(facecolor='w', edgecolor='none', pad=1.0, alpha=0.9))
+
+
+redshift_ticks = np.array([0., 1., 2., 5., 10., 20., 50., 100.])
+redshift_labels = ["$0$", "$1$", "$2$", "$5$", "$10$", "$20$", "$50$", "$100$"]
+a_ticks = 1. / (redshift_ticks + 1.)
+
+xticks(a_ticks, redshift_labels)
+minorticks_off()
+
+xlabel("${\\rm Redshift}~z$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=0)
+xlim(9e-3, 1.1)
+ylim(20, 2.5e7)
+
+savefig("Temperature_evolution.png", dpi=200)
+
diff --git a/examples/SantaBarbara/SantaBarbara-256/rhoTHaloComparison.py b/examples/SantaBarbara/SantaBarbara-256/rhoTHaloComparison.py
new file mode 100644
index 0000000000000000000000000000000000000000..edf4e47527bd61e3f9b017b5a53510036dbcacac
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/rhoTHaloComparison.py
@@ -0,0 +1,221 @@
+"""
+This script finds the temperatures inside all of the halos and
+compares it against the virial temperature. This uses velociraptor
+and the SWIFT snapshot.
+
+Folkert Nobels (2018) nobels@strw.leidenuniv.nl
+Josh Borrow (2019) joshua.borrow@durham.ac.uk
+"""
+
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from matplotlib.colors import LogNorm
+
+mH = 1.6733e-24  # g
+kB = 1.38e-16  # erg/K
+
+
+def virial_temp(mu, M, h=0.703, a=1.0):
+    """
+    Calculates the virial temperature according to
+
+    https://arxiv.org/pdf/1105.5701.pdf
+
+    Equation 1.
+    """
+    return  4e4 * (mu / 1.2) * (M * h / 1e8) ** (2 / 3) / (10 * a)
+
+
+def calculate_group_sizes_array(offsets: np.array, total_size: int) -> np.array:
+    """
+    Calculates the group sizes array from the offsets and total size, i.e. it
+    calculates the diff between all of the offsets.
+    """
+
+    # Does not include the LAST one
+    group_sizes = [x - y for x, y in zip(offsets[1:], offsets[:-1])]
+    group_sizes += [total_size - offsets[-1]]
+    group_sizes = np.array(group_sizes, dtype=type(offsets[0]))
+
+    return group_sizes
+
+
+def create_group_array(group_sizes: np.array) -> np.array:
+    """
+    Creates an array that looks like:
+    [GroupID0, GroupID0, ..., GroupIDN, GroupIDN]
+    i.e. for each group create the correct number of group ids.
+    This is used to be sorted alongside the particle IDs to track
+    the placement of group IDs.
+    """
+
+    slices = []
+    running_total_of_particles = type(offsets[0])(0)
+
+    for group in group_sizes:
+        slices.append([running_total_of_particles, group + running_total_of_particles])
+        running_total_of_particles += group
+
+    groups = np.empty(group_sizes.sum(), dtype=int)
+
+    for group_id, group in enumerate(slices):
+        groups[group[0] : group[1]] = group_id
+
+    return groups
+
+
+if __name__ == "__main__":
+    import argparse as ap
+
+    PARSER = ap.ArgumentParser(
+        description="""
+        Makes many plots comparing the virial temperature and the
+        temperature of halos. Requires the velociraptor files and
+        the SWIFT snapshot.
+        """
+    )
+
+    PARSER.add_argument(
+        "-s",
+        "--snapshot",
+        help="""
+            Filename and path for the snapshot (without the .hdf5),
+            Default: ./santabarbara_0153
+            """,
+        required=False,
+        default="./santabarbara_0153",
+    )
+
+    PARSER.add_argument(
+        "-v",
+        "--velociraptor",
+        help="""
+            The filename and path of the velociraptor files, excluding
+            the descriptors (i.e. without .catalog_particles).
+            Default: ./halo/santabarbara
+            """,
+        required=False,
+        default="./halo/santabarbara",
+    )
+
+    ARGS = vars(PARSER.parse_args())
+
+    # Grab some metadata before we begin.
+    with h5py.File("%s.hdf5" % ARGS["snapshot"], "r") as handle:
+        # Cosmology
+        a = handle["Cosmology"].attrs["Scale-factor"][0]
+        h = handle["Cosmology"].attrs["h"][0]
+
+        # Gas
+        hydro = handle["HydroScheme"].attrs
+        X = hydro["Hydrogen mass fraction"][0]
+        gamma = hydro["Adiabatic index"][0]
+        mu = 1 / (X + (1 - X) / 4)
+
+    # First we must construct a group array so we know which particles belong
+    # to which group.
+    with h5py.File("%s.catalog_groups" % ARGS["velociraptor"], "r") as handle:
+        offsets = handle["Offset"][...]
+
+    # Then, extract the particles that belong to the halos. For that, we need
+    # the particle IDs:
+    with h5py.File("%s.catalog_particles" % ARGS["velociraptor"], "r") as handle:
+        ids_in_halos = handle["Particle_IDs"][...]
+
+    number_of_groups = len(offsets)
+    group_sizes = calculate_group_sizes_array(offsets, ids_in_halos.size)
+    group_array = create_group_array(group_sizes)
+
+    # We can now load the particle data from the snapshot.
+    with h5py.File("%s.hdf5" % ARGS["snapshot"], "r") as handle:
+        gas_particles = handle["PartType0"]
+
+        particle_ids = gas_particles["ParticleIDs"][...]
+
+        # Requires numpy 1.15 or greater.
+        _, particles_in_halos_mask, group_array_mask = np.intersect1d(
+            particle_ids,
+            ids_in_halos,
+            assume_unique=True,
+            return_indices=True,
+        )
+
+        # We also need to re-index the group array to cut out DM particles
+        group_array = group_array[group_array_mask]
+        
+        # Kill the spare
+        del particle_ids
+
+        # Now we can only read the properties that we require from the snapshot!
+        temperatures = np.take(gas_particles["InternalEnergy"], particles_in_halos_mask)
+        # This 1e10 should probably be explained somewhere...
+        temperatures *= 1e10 * (gamma - 1) * mu * mH / kB
+
+        densities = np.take(gas_particles["Density"], particles_in_halos_mask)
+
+    # Just a quick check to make sure nothing's gone wrong.
+    assert len(group_array) == len(temperatures)
+
+    # Now we can loop through all the particles and find out the mean temperature and
+    # density in each halo.
+
+    particles_in_group = np.zeros(number_of_groups, dtype=int)
+    temp_in_group = np.zeros(number_of_groups, dtype=float)
+    dens_in_group = np.zeros(number_of_groups, dtype=float)
+
+    for group, T, rho in zip(group_array, temperatures, densities):
+        particles_in_group[group] += 1
+        temp_in_group[group] += T
+        dens_in_group[group] += rho
+
+    # First get a mask to ensure no runtime warnings
+    mask = particles_in_group != 0
+    
+    # Normalize
+    temp_in_group[mask] /= particles_in_group[mask]
+    dens_in_group[mask] /= particles_in_group[mask]
+
+    # Now we can load the data according to the halo finder to compare with.
+    with h5py.File("%s.properties" % ARGS["velociraptor"], "r") as handle:
+        halo_masses = handle["Mass_200crit"][...]
+
+    halo_temperatures = virial_temp(mu, halo_masses * 1e10, h=h, a=a)
+
+    # Finally, the plotting!
+
+    fig, ax = plt.subplots()
+    ax.loglog()
+
+    mask = np.logical_and.reduce([
+         halo_temperatures != 0.0,
+         temp_in_group != 0.0,
+    ])
+
+    temp_in_group = temp_in_group[mask]
+    halo_temperatures = halo_temperatures[mask]
+
+    mean_range = [temp_in_group.min(), temp_in_group.max()]
+    halo_range = [halo_temperatures.min(), halo_temperatures.max()]
+
+    bottom = min([halo_range[0], mean_range[0]])
+    top = max([halo_range[1], mean_range[1]])
+
+    plt.plot(
+        [bottom, top],
+        [bottom, top],
+        lw=2, linestyle="--", color="grey", label="1:1"
+    )
+    
+    ax.scatter(halo_temperatures, temp_in_group, s=2, edgecolor="none", label="Halos")
+
+    ax.set_ylabel("Mean Group Temperature [K]")
+    ax.set_xlabel("Halo Virial Temperature [K]")
+
+    ax.set_ylim(mean_range)
+    ax.set_xlim(halo_range)
+
+    ax.legend(frameon=False)
+
+    fig.tight_layout()
+    fig.savefig("temperature_comparison.png", dpi=300)
diff --git a/examples/SantaBarbara/SantaBarbara-256/rhoTPlot.py b/examples/SantaBarbara/SantaBarbara-256/rhoTPlot.py
new file mode 100644
index 0000000000000000000000000000000000000000..c290268eaa548e188bb652104ea9e726ea88a267
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/rhoTPlot.py
@@ -0,0 +1,258 @@
+"""
+Makes a rho-T plot. Uses the swiftsimio library.
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from swiftsimio import SWIFTDataset, SWIFTMetadata, SWIFTUnits
+
+from unyt import mh, cm, Gyr
+from tqdm import tqdm
+from matplotlib.colors import LogNorm
+from matplotlib.animation import FuncAnimation
+
+# Constants; these could be put in the parameter file but are rarely changed.
+density_bounds = [1e-8, 1e4]  # in nh/cm^3
+temperature_bounds = [1e2, 1e8]  # in K
+bins = 128
+
+# Plotting controls
+cmap = "viridis"
+
+
+def get_data(filename):
+    """
+    Grabs the data (T in Kelvin and density in mh / cm^3).
+    """
+
+    data = SWIFTDataset(filename)
+
+    data.gas.density.convert_to_units(mh / (cm ** 3))
+    data.gas.temperature.convert_to_cgs()
+
+    return data.gas.density, data.gas.temperature
+
+
+def make_hist(filename, density_bounds, temperature_bounds, bins):
+    """
+    Makes the histogram for filename with bounds as lower, higher
+    for the bins and "bins" the number of bins along each dimension.
+
+    Also returns the edges for pcolormesh to use.
+    """
+
+    density_bins = np.logspace(
+        np.log10(density_bounds[0]), np.log10(density_bounds[1]), bins
+    )
+    temperature_bins = np.logspace(
+        np.log10(temperature_bounds[0]), np.log10(temperature_bounds[1]), bins
+    )
+
+    H, density_edges, temperature_edges = np.histogram2d(
+        *get_data(filename), bins=[density_bins, temperature_bins]
+    )
+
+    return H.T, density_edges, temperature_edges
+
+
+def setup_axes():
+    """
+    Creates the figure and axis object.
+    """
+    fig, ax = plt.subplots(1, figsize=(6, 5), dpi=300)
+
+    ax.set_xlabel("Density [$n_H$ cm$^{-3}$]")
+    ax.set_ylabel("Temperature [K]")
+
+    ax.loglog()
+
+    return fig, ax
+
+
+def make_single_image(filename, density_bounds, temperature_bounds, bins):
+    """
+    Makes a single image and saves it to rhoTPlot_{filename}.png.
+    
+    Filename should be given _without_ hdf5 extension.
+    """
+
+    fig, ax = setup_axes()
+    hist, d, T = make_hist(
+        "{:s}.hdf5".format(filename), density_bounds, temperature_bounds, bins
+    )
+
+    mappable = ax.pcolormesh(d, T, hist, cmap=cmap, norm=LogNorm())
+    fig.colorbar(mappable, label="Number of particles", pad=0)
+
+    fig.tight_layout()
+
+    fig.savefig("rhoTPlot_{:s}.png".format(filename))
+
+    return
+
+
+def make_movie(args, density_bounds, temperature_bounds, bins):
+    """
+    Makes a movie and saves it to rhoTPlot_{stub}.mp4.
+    """
+
+    fig, ax = setup_axes()
+
+    def grab_metadata(n):
+        filename = "{:s}_{:04d}.hdf5".format(args["stub"], n)
+        data = SWIFTMetadata(filename)
+
+        return data
+
+    def grab_data(n):
+        filename = "{:s}_{:04d}.hdf5".format(args["stub"], n)
+
+        H, _, _ = make_hist(filename, density_bounds, temperature_bounds, bins)
+
+        # Need to ravel because pcolormesh's set_array takes a 1D array. Might
+        # as well do it here, beacuse 1d arrays are easier to max() than 2d.
+        return H.ravel()
+
+    histograms = [
+        grab_data(n)
+        for n in tqdm(
+            range(args["initial"], args["final"] + 1), desc="Histogramming data"
+        )
+    ]
+
+    metadata = [
+        grab_metadata(n)
+        for n in tqdm(
+            range(args["initial"], args["final"] + 1), desc="Grabbing metadata"
+        )
+    ]
+
+    units = SWIFTUnits("{:s}_{:04d}.hdf5".format(args["stub"], args["initial"]))
+
+    # Need to get a reasonable norm so that we don't overshoot.
+    max_particles = max([x.max() for x in histograms])
+
+    norm = LogNorm(vmin=1, vmax=max_particles)
+
+    # First, let's make the initial frame (we need this for our d, T values that we
+    # got rid of in grab_data.
+    hist, d, T = make_hist(
+        "{:s}_{:04d}.hdf5".format(args["stub"], args["initial"]),
+        density_bounds,
+        temperature_bounds,
+        bins,
+    )
+
+    mappable = ax.pcolormesh(d, T, hist, cmap=cmap, norm=norm)
+    fig.colorbar(mappable, label="Number of particles", pad=0)
+
+    fig.tight_layout()
+
+    # Once we've rearranged the figure with tight_layout(), we can start laing
+    # Down the metadata text.
+
+    def format_metadata(metadata: SWIFTMetadata):
+        t = metadata.t * units.units["Unit time in cgs (U_t)"]
+        t.convert_to_units(Gyr)
+        
+        x = "$a$: {:2.2f}\n$z$: {:2.2f}\n$t$: {:2.2f}".format(
+            metadata.a, metadata.z, t
+        )
+
+        return x
+
+    text = ax.text(
+        0.025,
+        0.975,
+        format_metadata(metadata[0]),
+        ha="left",
+        va="top",
+        transform=ax.transAxes,
+    )
+
+    ax.text(
+        0.975,
+        0.975,
+        metadata[0].code["Git Revision"].decode("utf-8"),
+        ha="right",
+        va="top",
+        transform=ax.transAxes,
+    )
+
+    def animate(data):
+        mappable.set_array(histograms[data])
+        text.set_text(format_metadata(metadata[data]))
+
+        return mappable
+
+    animation = FuncAnimation(
+        fig, animate, range(len(histograms)), fargs=[], interval=1000 / 25
+    )
+
+    animation.save("rhoTPlot_{:s}.mp4".format(args["stub"]))
+
+    return
+
+
+if __name__ == "__main__":
+    import argparse as ap
+
+    parser = ap.ArgumentParser(
+        description="""
+             Plotting script for making a rho-T plot.
+             Takes the filename handle, start, and (optionally) stop
+             snapshots. If stop is not given, png plot is produced for
+             that snapshot. If given, a movie is made.
+             """
+    )
+
+    parser.add_argument(
+        "-i",
+        "--initial",
+        help="""Initial snapshot number. Default: 0.""",
+        default=0,
+        required=False,
+        type=int,
+    )
+
+    parser.add_argument(
+        "-f",
+        "--final",
+        help="""Final snapshot number. Default: 0.""",
+        default=0,
+        required=False,
+        type=int,
+    )
+
+    parser.add_argument(
+        "-s",
+        "--stub",
+        help="""Stub for the filename (e.g. santabarbara). This is
+                the first part of the filename for the snapshots,
+                not including the final underscore. Required.""",
+        type=str,
+        required=True,
+    )
+
+    args = vars(parser.parse_args())
+
+    if args["final"] <= args["initial"]:
+        # Run in single image mode.
+        filename = "{:s}_{:04d}".format(args["stub"], args["initial"])
+
+        make_single_image(
+            filename,
+            density_bounds=density_bounds,
+            temperature_bounds=temperature_bounds,
+            bins=bins,
+        )
+
+    else:
+        # Movie mode!
+        make_movie(
+            args,
+            density_bounds=density_bounds,
+            temperature_bounds=temperature_bounds,
+            bins=bins,
+        )
diff --git a/examples/SantaBarbara/SantaBarbara-256/run.sh b/examples/SantaBarbara/SantaBarbara-256/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..72c219acb201b3a3541b6a08d799b21ba4638009
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/run.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+# Run SWIFT
+../../swift --cosmology --hydro --self-gravity --threads=28 santa_barbara.yml
+
diff --git a/examples/SantaBarbara/SantaBarbara-256/run_velociraptor.sh b/examples/SantaBarbara/SantaBarbara-256/run_velociraptor.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3b7ca06ec8125d05066762f20c7324f9faa42348
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/run_velociraptor.sh
@@ -0,0 +1,2 @@
+mkdir halo
+${VELOCIRAPTOR_PATH} -I 2 -i santabarbara_0153 -C velociraptor_cfg.cfg -o ./halo/santabarbara
diff --git a/examples/SantaBarbara/SantaBarbara-256/santa_barbara.yml b/examples/SantaBarbara/SantaBarbara-256/santa_barbara.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0abbc91019957952276b51db01a3a1b71d6e4fdf
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/santa_barbara.yml
@@ -0,0 +1,82 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 Msun 
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc 
+  UnitVelocity_in_cgs: 1e5           # 1 km/s 
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.5        
+  a_begin:        0.047619048        # z_ini = 20
+  a_end:          1.0                # z_end = 0
+  Omega_m:        1.0        
+  Omega_lambda:   0.0        
+  Omega_b:        0.1        
+  
+# Parameters governing the time integration
+TimeIntegration:
+  dt_max:     0.01
+  dt_min:     1e-10
+
+Scheduler:
+  max_top_level_cells: 16
+  cell_split_size:     100
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            santabarbara 
+  scale_factor_first:  0.05
+  delta_time:          1.02
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:           1.02
+  scale_factor_first:   0.05
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025  
+  theta:                  0.5
+  comoving_softening:     0.01    # 10 kpc = 1/25 mean inter-particle separation
+  max_physical_softening: 0.00263 # 10 ckpc = 2.63 pkpc at z=2.8 (EAGLE-like evolution of softening).
+  mesh_side_length:       128
+
+# Parameters of the hydro scheme
+SPH:
+  resolution_eta:      1.2348   # "48 Ngb" with the cubic spline kernel
+  h_min_ratio:         0.1
+  CFL_condition:       0.1
+  initial_temperature: 1200.    # (1 + z_ini)^2 * 2.72K
+  minimal_temperature: 100.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./SantaBarbara_256.hdf5
+  periodic:   1
+  cleanup_h_factors: 1              # ICs were generated for Gadget, we need to get rid of h-factors
+  cleanup_velocity_factors: 1       # ICs were generated for Gadget, we need to get rid of sqrt(a) factors in the velocity
+  generate_gas_in_ics: 1            # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths: 1      # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Impose primoridal metallicity
+EAGLEChemistry:
+  init_abundance_metal:     0.0
+  init_abundance_Hydrogen:  0.752
+  init_abundance_Helium:    0.248
+  init_abundance_Carbon:    0.0
+  init_abundance_Nitrogen:  0.0
+  init_abundance_Oxygen:    0.0
+  init_abundance_Neon:      0.0
+  init_abundance_Magnesium: 0.0
+  init_abundance_Silicon:   0.0
+  init_abundance_Iron:      0.0
+
+EAGLECooling:
+  dir_name:                ./coolingtables/
+  H_reion_z:               11.5 
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
diff --git a/examples/SantaBarbara/SantaBarbara-256/velociraptor_cfg.cfg b/examples/SantaBarbara/SantaBarbara-256/velociraptor_cfg.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..b904d2a419ad6010e12073209bd77e8f59eef7c4
--- /dev/null
+++ b/examples/SantaBarbara/SantaBarbara-256/velociraptor_cfg.cfg
@@ -0,0 +1,147 @@
+#configuration file.
+#It is suggested that you alter this file as necessary as not all options will be desired and some conflict.
+#This file is simply meant to show options available.
+
+################################
+#input related
+################################
+#input is from a cosmological so can use parameters like box size, h, Omega_m to calculate length and density scales
+Cosmological_input=1
+
+#Type of snapshot to read. Ignored when using within SWIFT.
+HDF_name_convention=6 # ILLUSTRIS 0, GADGETX 1, EAGLE 2, GIZMO 3, SIMBA 4, MUFASA 5, SWIFTEAGLE 6
+
+#whether star particles are present in the input
+Input_includes_star_particle=0
+#bhs present
+Input_includes_bh_particle=0
+#no wind present
+Input_includes_wind_particle=0
+#no tracers present
+Input_includes_tracer_particle=0
+#no low res/extra dm particle types present
+Input_includes_extradm_particle=0
+
+
+Particle_search_type=1 #search all particles, see allvars for other types
+Baryon_searchflag=0 #if 1 search for baryons separately using phase-space search when identifying substructures, 2 allows special treatment in field FOF linking and phase-space substructure search, 0 treat the same as dark matter particles
+Search_for_substructure=0 #if 0, end search once field objects are found
+FoF_Field_search_type=5 #5 3DFOF search for field halos, 4 for 6DFOF clean up of field halos, 3 for 6DFOF with velocity scale distinct for each halo
+Unbind_flag=0 #run unbinding
+Halo_core_search=0
+Significance_level=1.0 #how significant a substructure is relative to Poisson noise. Values >= 1 are fine.
+
+################################
+# unit options, should always be provided
+################################
+
+# This is only for i/o. Specifies what units the code was running in.
+# These should be set to whatever internal units we use.
+# They have no impact on the way the code runs.
+Length_unit_to_kpc=1000. #conversion of output length units to kpc
+Velocity_to_kms=1.0      #conversion of output velocity units to km/s
+Mass_to_solarmass=1e+10  #conversion of output mass units to solar masses
+
+# units conversion from input to desired internal unit.
+# These should be set to 1 unless a conversion is expected.
+Length_unit=1.0    #default length unit
+Velocity_unit=1.0  #default velocity unit
+Mass_unit=1.0      #default mass unit
+
+# These are ignored when running within SWIFT.
+# When using standalone code, G and H must match the value used in the run. 
+Gravity=4.300927e+01   # In internal units (here 10^10 Msun, km/s, Mpc)
+Hubble_unit=100.0      # This is H0 / h in internal units. 
+
+################################
+#search related options
+################################
+
+#how to search a simulation
+# searches for separate 6dfof cores in field haloes, and then more than just flags halo as merging, assigns particles to each merging "halo". 2 is full separation, 1 is flagging, 0 is off
+#also useful for zoom simulations or simulations of individual objects, setting this flag means no field structure search is run
+Singlehalo_search=0 #if file is single halo in which one wishes to search for substructure
+#additional option for field haloes
+Keep_FOF=0 #if field 6DFOF search is done, allows to keep structures found in 3DFOF (can be interpreted as the inter halo stellar mass when only stellar search is used).\n
+
+#minimum size for structures
+Minimum_size=256 #min 20 particles
+Minimum_halo_size=-1 #if field halos have different minimum sizes, otherwise set to -1.
+
+#for field fof halo search
+Halo_linking_length_factor=2.0 #factor by which Physical_linking_length is changed when searching for field halos. Typical values are ~2 when using iterative substructure search.
+Halo_velocity_linking_length_factor=5.0 #for 6d fof halo search increase ellv from substructure search
+
+#for mean field estimates and local velocity density distribution funciton estimator related quantiites, rarely need to change this
+Cell_fraction = 0.01 #fraction of field fof halo used to determine mean velocity distribution function. Typical values are ~0.005-0.02
+Grid_type=1 #normal entropy based grid, shouldn't have to change
+Nsearch_velocity=32 #number of velocity neighbours used to calculate local velocity distribution function. Typial values are ~32
+Nsearch_physical=256 #numerof physical neighbours from which the nearest velocity neighbour set is based. Typical values are 128-512
+
+#for substructure search, rarely ever need to change this
+FoF_search_type=1 #default phase-space FOF search. Don't really need to change
+Iterative_searchflag=1 #iterative substructure search, for substructure find initial candidate substructures with smaller linking lengths then expand search region
+Outlier_threshold=2.5 #outlier threshold for a particle to be considered residing in substructure, that is how dynamically distinct a particle is. Typical values are >2
+Velocity_ratio=2.0 #ratio of speeds used in phase-space FOF
+Velocity_opening_angle=0.10 #angle between velocities. 18 degrees here, typical values are ~10-30
+Physical_linking_length=0.10 #physical linking length. IF reading periodic volumes in gadget/hdf/ramses, in units of the effective inter-particle spacing. Otherwise in user defined code units. Here set to 0.10 as iterative flag one, values of 0.1-0.3 are typical.
+Velocity_linking_length=0.20 #where scaled by structure dispersion
+
+#for iterative substructure search, rarely ever need to change this
+Iterative_threshold_factor=1.0 #change in threshold value when using iterative search. Here no increase in threshold if iterative or not
+Iterative_linking_length_factor=2.0 #increase in final linking final iterative substructure search will be sqrt(2.25)*this factor
+Iterative_Vratio_factor=1.0 #change in Vratio when using iterative search. no change in vratio
+Iterative_ThetaOp_factor=1.0 #change in velocity opening angle. no change in velocity opening angle
+
+#for checking for halo merger remnants, which are defined as large, well separated phase-space density maxima
+
+#if searching for cores, linking lengths. likely does not need to change much
+Use_adaptive_core_search=2 #calculate dispersions in configuration & vel space to determine linking lengths
+Halo_core_ellx_fac=1.0 #how linking lengths are changed when searching for local 6DFOF cores,
+Halo_core_ellv_fac=1.0 #how velocity lengths based on dispersions are changed when searching for local 6DFOF cores
+Halo_core_ncellfac=0.05 #fraction of total halo particle number setting min size of a local 6DFOF core
+Halo_core_adaptive_sigma_fac=2.0 #used when running fully adaptive core search with phase-space tensors, specifies the width of the physical linking length in configuration space dispersion (think of this as how many sigma to include). Typically values are 2
+Halo_core_num_loops=3 #allows the core search to iterate, shrinking the velocity linking length to used till the number of cores identified decreases or this limit is reached. Allows apative search with larger linking length to be robust. Typically values are 3-5
+Halo_core_loop_ellv_fac=0.75 #Factor by which velocity linking length is decreased when running loops for core search.  Typically values are 0.75
+
+################################
+#Unbinding options (VELOCIraptor is able to accurately identify tidal debris so particles need not be bound to a structure)
+################################
+
+#unbinding related items
+
+Min_bound_mass_frac=0.2 #minimum bound mass fraction, not yet implemented
+#alpha factor used to determine whether particle is "bound" alaph*T+W<0. For standard subhalo catalogues use >0.9 but if interested in tidal debris 0.2-0.5
+Allowed_kinetic_potential_ratio=0.2
+#run unbinding of field structures, aka halos
+Bound_halos=0
+#simple Plummer softening length when calculating gravitational energy. If cosmological simulation with period, is fraction of interparticle spacing
+Softening_length=0.00263
+#don't keep background potential when unbinding
+Keep_background_potential=0
+
+################################
+#Calculation of properties related options
+################################
+#when calculating properties, for field objects calculate inclusive masses
+Inclusive_halo_masses=1 #calculate inclusive masses
+#ensures that output is comoving distances per little h
+Comoving_units=0
+
+################################
+#output related
+################################
+
+Write_group_array_file=0 #write a group array file
+Separate_output_files=0 #separate output into field and substructure files similar to subfind
+Binary_output=2 #binary output 1, ascii 0, and HDF 2
+
+#halo ids are adjusted by this value * 1000000000000 (or 1000000 if code compiled with the LONGINTS option turned off)
+#to ensure that halo ids are temporally unique. So if you had 100 snapshots, for snap 100 set this to 100 and 100*1000000000000 will
+#be added to the halo id as set for this snapshot, so halo 1 becomes halo 100*1000000000000+1 and halo 1 of snap 0 would just have ID=1
+Snapshot_value=1
+
+################################
+#other options
+################################
+Verbose=0 #how talkative do you want the code to be, 0 not much, 1 a lot, 2 chatterbox
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/README b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/README
new file mode 100644
index 0000000000000000000000000000000000000000..8063a5da1e68b608759d35373e6006d17bf5047e
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/README
@@ -0,0 +1,6 @@
+This parameter file can be used to run the exact same example
+with the Gadget-2 code.
+
+The Gadget code has to be compiled with at least the following options:
+ - PERIODIC
+ - HAVE_HDF5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/small_cosmo_volume_dm.param b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/small_cosmo_volume_dm.param
new file mode 100644
index 0000000000000000000000000000000000000000..4eaaab4cb124db898928c75e7a7a03bb850c5a9f
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/Gadget2/small_cosmo_volume_dm.param
@@ -0,0 +1,137 @@
+
+% System of units
+
+UnitLength_in_cm         3.08567758e24      %  1.0 Mpc
+UnitMass_in_g            1.98848e43         %  1.0e10 solar masses 
+UnitVelocity_in_cm_per_s 1e5                %  1 km/sec 
+GravityConstantInternal  4.300927e+01       %  Same value as SWIFT
+
+%  Relevant files
+InitCondFile  	   small_cosmo_volume
+OutputDir          data/
+
+EnergyFile         energy.txt
+InfoFile           info.txt
+TimingsFile        timings.txt
+CpuFile            cpu.txt
+
+RestartFile        restart
+SnapshotFileBase   box
+
+OutputListFilename dummy
+
+% CPU time -limit
+
+TimeLimitCPU      360000  % = 10 hours
+ResubmitOn        0
+ResubmitCommand   my-scriptfile  
+
+
+% Code options
+
+ICFormat                 3
+SnapFormat               3
+ComovingIntegrationOn    1
+
+TypeOfTimestepCriterion  0
+OutputListOn             0
+PeriodicBoundariesOn     1
+
+%  Caracteristics of run
+
+TimeBegin             0.019607843 % z = 50.
+TimeMax	              1.          % z = 0.
+
+Omega0	              0.276
+OmegaLambda           0.724
+OmegaBaryon           0.0455
+HubbleParam           0.703
+BoxSize               100.        % Mpc / h
+
+% Output frequency
+
+TimeBetSnapshot        1.02
+TimeOfFirstSnapshot    0.02
+
+CpuTimeBetRestartFile     36000.0    ; here in seconds
+TimeBetStatistics         0.02
+
+NumFilesPerSnapshot       1
+NumFilesWrittenInParallel 1
+
+% Accuracy of time integration
+
+ErrTolIntAccuracy      0.025 
+MaxRMSDisplacementFac  0.25
+CourantFac             0.1     
+MaxSizeTimestep        0.01
+MinSizeTimestep        1e-6
+
+
+% Tree algorithm, force accuracy, domain update frequency
+
+ErrTolTheta            	     0.3
+TypeOfOpeningCriterion	     1
+ErrTolForceAcc         	     0.005
+TreeDomainUpdateFrequency    0.01
+
+%  Further parameters of SPH
+
+DesNumNgb              48
+MaxNumNgbDeviation     1.
+ArtBulkViscConst       0.8
+InitGasTemp            0.        
+MinGasTemp             0.
+
+% Memory allocation
+
+PartAllocFactor       1.6
+TreeAllocFactor       0.8
+BufferSize            30  
+
+% Softening lengths
+
+MinGasHsmlFractional 0.001
+
+SofteningGas       0
+SofteningHalo      0.0625        # 62.5 kpc / h = 1/25 of mean inter-particle separation
+SofteningDisk      0
+SofteningBulge     0           
+SofteningStars     0
+SofteningBndry     0
+
+SofteningGasMaxPhys       0
+SofteningHaloMaxPhys      0.0625   # 62.5 kpc / h = 1/25 of mean inter-particle separation
+SofteningDiskMaxPhys      0
+SofteningBulgeMaxPhys     0           
+SofteningStarsMaxPhys     0
+SofteningBndryMaxPhys     0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/SmallCosmoVolume/README b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/README
similarity index 86%
rename from examples/SmallCosmoVolume/README
rename to examples/SmallCosmoVolume/SmallCosmoVolume_DM/README
index 68c137aee30c08bb476b760c75dceaa5e1ede87e..14a289cf4a1d638c18f421f23ca8bcf0ced68d1b 100644
--- a/examples/SmallCosmoVolume/README
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/README
@@ -6,4 +6,4 @@ The ICs have been generated to run with Gadget-2 so we need to switch
 on the options to cancel the h-factors and a-factors at reading time.
 
 MD5 checksum of the ICs:
-2a9c603ffb1f6d29f3d98a3ecb9d3238  small_cosmo_volume.hdf5
+08736c3101fd738e22f5159f78e6022b  small_cosmo_volume.hdf5
diff --git a/examples/SmallCosmoVolume/getIC.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/getIC.sh
similarity index 100%
rename from examples/SmallCosmoVolume/getIC.sh
rename to examples/SmallCosmoVolume/SmallCosmoVolume_DM/getIC.sh
diff --git a/examples/SmallCosmoVolume/run.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/run.sh
similarity index 66%
rename from examples/SmallCosmoVolume/run.sh
rename to examples/SmallCosmoVolume/SmallCosmoVolume_DM/run.sh
index fe67706d7512d6f4ff1537ce008ce3a52a6ce6a6..5d166e00a630ca93ff92a42f6d26b012b132e097 100755
--- a/examples/SmallCosmoVolume/run.sh
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/run.sh
@@ -7,5 +7,6 @@ then
     ./getIC.sh
 fi
 
-../swift -c -G -t 8 small_cosmo_volume.yml 2>&1 | tee output.log
+# Run SWIFT
+../../swift --cosmology --self-gravity --threads=8 small_cosmo_volume_dm.yml 2>&1 | tee output.log
 
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ebe3a78ee0d03eb53752b1dfa8fa749931a754a9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/small_cosmo_volume_dm.yml
@@ -0,0 +1,57 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Structure finding options
+StructureFinding:
+  config_file_name:     stf_input_6dfof_dmonly_sub.cfg
+  basename:             ./stf
+  scale_factor_first:   0.02
+  delta_time:           1.02
+
+Cosmology:                      # WMAP9 cosmology
+  Omega_m:        0.276
+  Omega_lambda:   0.724
+  Omega_b:        0.0455
+  h:              0.703
+  a_begin:        0.019607843	# z_ini = 50.
+  a_end:          1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:          0.025         
+  theta:        0.3           
+  comoving_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_softening: 0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  mesh_side_length:       64
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            snap
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  small_cosmo_volume.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
diff --git a/examples/SmallCosmoVolume/stf_input_6dfof_dmonly_sub.cfg b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/stf_input_6dfof_dmonly_sub.cfg
similarity index 99%
rename from examples/SmallCosmoVolume/stf_input_6dfof_dmonly_sub.cfg
rename to examples/SmallCosmoVolume/SmallCosmoVolume_DM/stf_input_6dfof_dmonly_sub.cfg
index 872e0ad6f44d8092ce1da6ac030a949dc4dba5d5..7368e5654204ad600192eff3defdd5f96e986ce5 100644
--- a/examples/SmallCosmoVolume/stf_input_6dfof_dmonly_sub.cfg
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_DM/stf_input_6dfof_dmonly_sub.cfg
@@ -104,7 +104,7 @@ Allowed_kinetic_potential_ratio=0.2
 #run unbinding of field structures, aka halos
 Bound_halos=0
 #simple Plummer softening length when calculating gravitational energy. If cosmological simulation with period, is fraction of interparticle spacing
-Softening_length=0.
+Softening_length=0.04
 #don't keep background potential when unbinding
 Keep_background_potential=0
 
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/README b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/README
new file mode 100644
index 0000000000000000000000000000000000000000..76eab82c0c434ceab334f82be8bd52e0d2dd4d08
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/README
@@ -0,0 +1,16 @@
+Small LCDM cosmological simulation generated by C. Power. Cosmology
+is WMAP9 and the box is 100Mpc/h in size with 64^3 particles.
+We use a softening length of 1/25th of the mean inter-particle separation.
+
+The ICs have been generated to run with Gadget-2 so we need to switch
+on the options to cancel the h-factors and a-factors at reading time.
+We generate gas from the ICs using SWIFT's internal mechanism and set the
+temperature to the expected gas temperature at this redshift.
+
+This example runs with Hydrodynamics and a halo finder, the halo finder 
+is run while running the simulation. At the end it is possible to 
+calculate the halo mass function of the halos in the simulated 
+volume, this is done by using haloevol.py.
+
+MD5 checksum of the ICs:
+08736c3101fd738e22f5159f78e6022b  small_cosmo_volume.hdf5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getHMF.py b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getHMF.py
new file mode 100755
index 0000000000000000000000000000000000000000..e56df323b004dfcfcd2c75c427fa6f3ecbe37a29
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getHMF.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+from hmf import MassFunction
+import hmf
+from astropy.cosmology import FlatLambdaCDM
+
+
+
+def getHMFz(z, H0=70.3, Om0=0.276, Ob0=0.0455, Tcmb0=2.725, Mmin=1e10, Mmax=1e15):
+    """ Fast function to call the HMF from hmf, this function only has 
+        7 variables and will return the dn/d(log10 M) and M array.
+        z: redshift
+        H0: Hubble constant
+        Om0: Matter density
+        Ob0: Baryon density
+        Tcmb0: CMB temperature at z=0
+        Mmin: minimum mass (solar masses)
+        Mmax: Maximum mass (solar masses) 
+    """
+    new_model = FlatLambdaCDM(H0=H0, Om0=Om0, Ob0=Ob0, Tcmb0=Tcmb0)
+    hmff = MassFunction(
+        cosmo_model=new_model,
+        Mmax=np.log10(Mmax),
+        Mmin=np.log10(Mmin),
+        z=z,
+        hmf_model="ST",
+    )
+    return hmff.m, hmff.dndlog10m
+
+
+def getHMFztinker(z, H0=70.3, Om0=0.276, Ob0=0.0455, Tcmb0=2.725, Mmin=1e10, Mmax=1e15):
+    """ Fast function to call the HMF from hmf, this function only has 
+        6 variables and will return the dn/d(log10 M) and M array.
+        H0: Hubble constant
+        Om0: Matter density
+        Ob0: Baryon density
+        Tcmb0: CMB temperature at z=0
+        Mmin: minimum mass (solar masses)
+        Mmax: Maximum mass (solar masses) 
+    """
+    new_model = FlatLambdaCDM(H0=H0, Om0=Om0, Ob0=Ob0, Tcmb0=Tcmb0)
+    hmff = MassFunction(
+        cosmo_model=new_model, Mmax=np.log10(Mmax), Mmin=np.log10(Mmin), z=z
+    )
+    return hmff.m, hmff.dndlog10m
+
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getIC.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3b8136cc5aca00a25792655c6c505cfeeb0f2bc9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/getIC.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/small_cosmo_volume.hdf5
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/haloevol.py b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/haloevol.py
new file mode 100755
index 0000000000000000000000000000000000000000..94e206cdf686ef5d2d3676d6fc36d6dfe8aea558
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/haloevol.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+###############################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+import numpy as np
+import h5py
+import matplotlib.pyplot as plt
+from getHMF import getHMFz, getHMFztinker
+
+dlogm = 0.2
+bins = 10 ** (np.arange(12, 15.2, dlogm))
+V = 142.0 ** 3
+
+itervalues = np.array([175, 185, 192, 198])
+
+for j in itervalues:
+    # Load the data
+    g = h5py.File("stf_%04d.VELOCIraptor.properties" % j, "r")
+    mass = g["Mass_200crit"][:] * 1e10  # convert to the correct unit
+    binnedmass, massrange = np.histogram(mass, bins=bins)
+
+    massnlarger = np.zeros(len(binnedmass))
+    for i in range(0, len(massnlarger)):
+        massnlarger[i] = np.sum(binnedmass[i:])
+
+    f = h5py.File("snap_%04d.hdf5" % (j + 1))
+    cosmo = f["Cosmology"]
+    redshift = cosmo.attrs["Redshift"][0]
+    a = cosmo.attrs["Scale-factor"][0]
+
+    # Determine the HMF
+    errormassn = massnlarger ** 0.5
+    numbden = massnlarger / V / a ** 3
+    numbdenerr = errormassn / V / a ** 3
+    massplot = (massrange[0:15] + massrange[1:16]) / 2
+    dernumbden = -np.diff(numbden) / np.diff(np.log10(massplot))
+    dererr = 2 ** 0.5 / dlogm * (numbdenerr[0:14] + numbdenerr[1:15]) / 2
+
+    plt.plot(
+        (massplot[0:14] + massplot[1:15]) / 2, dernumbden, label="SWIFT - SPH $64^3$"
+    )
+    plt.fill_between(
+        (massplot[0:14] + massplot[1:15]) / 2,
+        dernumbden - dererr,
+        dernumbden + dererr,
+        alpha=0.4,
+    )
+    plt.xscale("log")
+    plt.ylim(1e-6, 1e-1)
+    plt.xlim(10 ** 11, 10 ** 15.5)
+
+    xplace = 10 ** 14.5
+    plt.text(xplace, 10 ** -2.3, "$\Omega_m=0.276$")
+    plt.text(xplace, 10 ** -2.6, "$\Omega_b=0.0455$")
+    plt.text(xplace, 10 ** -2.9, "$\Omega_\Lambda=0.724$")
+    plt.text(xplace, 10 ** -3.2, "$h=0.703$")
+    plt.text(xplace, 10 ** -3.5, "$z=%2.2f$" % redshift)
+
+    m, dndlogm = getHMFz(redshift)
+    plt.plot(m / 0.7, dndlogm * 0.7 ** 3, label="Sheth et al. 2001")
+
+    m, dndlogm = getHMFztinker(redshift)
+    plt.plot(m / 0.7, dndlogm * 0.7 ** 3, label="Tinker et al. 2008")
+
+    plt.xlabel("M${}_{200}$ ($M_\odot$)")
+    plt.ylabel("dn/d($\log$10(M${}_{200}$) ($Mpc^{-3}$)")
+    plt.axvline(x=32 * 3.5e11, linestyle="--", color="k")
+    plt.yscale("log")
+    plt.legend()
+    plt.savefig("./HMF_%04d.png" % j)
+    plt.close()
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/run.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b89662ed7ae621c6fbc29ccfb566fa61367693a9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/run.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e small_cosmo_volume.hdf5 ]
+then
+    echo "Fetching initial conditions for the small cosmological volume example..."
+    ./getIC.sh
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --self-gravity --velociraptor --threads=8 small_cosmo_volume.yml 2>&1 | tee output.log
+
+echo "Make a plot of the HMF"
+if command -v python3 &>/dev/null; then
+    python3 haloevol.py
+else
+    python haloevol.py
+fi
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/small_cosmo_volume.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/small_cosmo_volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..15007ca8d39a328166a208a2d4cbbc6aea580009
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/small_cosmo_volume.yml
@@ -0,0 +1,69 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+Cosmology:                      # WMAP9 cosmology
+  Omega_m:        0.276
+  Omega_lambda:   0.724
+  Omega_b:        0.0455
+  h:              0.703
+  a_begin:        0.019607843	# z_ini = 50.
+  a_end:          1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:          0.025         
+  theta:        0.3           
+  comoving_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_softening: 0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  mesh_side_length:       64
+
+# Parameters of the hydro scheme
+SPH:
+  resolution_eta:      1.2348   # "48 Ngb" with the cubic spline kernel
+  h_min_ratio:         0.1
+  CFL_condition:       0.1
+  initial_temperature: 7075.    # (1 + z_ini)^2 * 2.72K
+  minimal_temperature: 100.
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            snap
+  delta_time:          1.05
+  scale_factor_first:  0.02
+  invoke_stf:          1
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  small_cosmo_volume.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
+  generate_gas_in_ics:         1     # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths:   1     # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Structure finding options (requires velociraptor)
+StructureFinding:
+  config_file_name:     stfconfig_input.cfg
+  basename:             ./stf
+  scale_factor_first:   0.02
+  delta_time:           1.02
+ 
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/stfconfig_input.cfg b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/stfconfig_input.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..4306bae3d23aab924ce8fa3a5c50e839823fbc2f
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_VELOCIraptor/stfconfig_input.cfg
@@ -0,0 +1,165 @@
+#suggested configuration file for hydro run and subhalo (and galaxy ie: associated baryons) catalog
+
+################################
+#input related
+################################
+#input is from a cosmological so can use parameters like box size, h, Omega_m to calculate length and density scales
+Cosmological_input=1
+
+#sets the total buffer size in bytes used to store temporary particle information
+#of mpi read threads before they are broadcast to the appropriate waiting non-read threads
+#if not set, default value is equivalent to 1e6 particles per mpi process, quite large
+#but significantly minimises the number of send/receives
+#in this example the buffer size is roughly that for a send/receive of 10000 particles
+#for 100 mpi processes
+MPI_particle_total_buf_size=100000000
+
+#gadget input related
+#NSPH_extra_blocks=0 #read extra sph blocks
+#NStar_extra_blocks=0 #read extra star blocks
+#NBH_extra_blocks=0 #read extra black hole blocks
+
+#HDF related input
+#Set the HDF name convection, 0 is illustris, 1 is gadget x, 2 is Eagle, 3 is gizmo
+HDF_name_convention=0
+#whether star particles are present in the input
+Input_includes_star_particle=1
+#bhs present
+Input_includes_bh_particle=1
+#no wind present
+Input_includes_wind_particle=0
+#no tracers present
+Input_includes_tracer_particle=0
+#no low res/extra dm particle types present
+Input_includes_extradm_particle=0
+
+################################
+#unit options, should always be provided
+################################
+#EDIT THIS SECTION!!!!
+#conversion of output length units to kpc
+Length_unit_to_kpc=1.0
+#conversion of output velocity units to km/s
+Velocity_to_kms=1.0
+#conversion of output mass units to solar masses
+Mass_to_solarmass=1.0
+#units conversion from input input to desired internal unit
+Length_unit=1.0 #default code unit,
+Velocity_unit=1.0 #default velocity unit,
+Mass_unit=1.0 #default mass unit,
+Gravity=43.0211349 #for 1e10 Msun, km/s and Mpc
+Hubble_unit=100.0 # assuming units are km/s and Mpc, then value of Hubble in km/s/Mpc
+
+################################
+#search related options
+################################
+
+#how to search a simulation
+Particle_search_type=1 #search all particles, see allvars for other types
+#for baryon search
+Baryon_searchflag=2 #if 1 search for baryons separately using phase-space search when identifying substructures, 2 allows special treatment in field FOF linking and phase-space substructure search, 0 treat the same as dark matter particles
+#for search for substruture
+Search_for_substructure=1 #if 0, end search once field objects are found
+#also useful for zoom simulations or simulations of individual objects, setting this flag means no field structure search is run
+Singlehalo_search=0 #if file is single halo in which one wishes to search for substructure
+#additional option for field haloes
+Keep_FOF=0 #if field 6DFOF search is done, allows to keep structures found in 3DFOF (can be interpreted as the inter halo stellar mass when only stellar search is used).\n
+
+#minimum size for structures
+Minimum_size=20 #min 20 particles
+Minimum_halo_size=-1 #if field halos have different minimum sizes, otherwise set to -1.
+
+#for field fof halo search
+FoF_Field_search_type=3 #5 3DFOF search for field halos, 4 for 6DFOF clean up of field halos, 3 for 6DFOF with velocity scale distinct for each halo
+Halo_linking_length_factor=2.0 #factor by which Physical_linking_length is changed when searching for field halos. Typical values are ~2 when using iterative substructure search.
+Halo_velocity_linking_length_factor=5.0 #for 6d fof halo search increase ellv from substructure search
+
+#for mean field estimates and local velocity density distribution funciton estimator related quantiites, rarely need to change this
+Cell_fraction = 0.01 #fraction of field fof halo used to determine mean velocity distribution function. Typical values are ~0.005-0.02
+Grid_type=1 #normal entropy based grid, shouldn't have to change
+Nsearch_velocity=32 #number of velocity neighbours used to calculate local velocity distribution function. Typial values are ~32
+Nsearch_physical=256 #numerof physical neighbours from which the nearest velocity neighbour set is based. Typical values are 128-512
+
+#for substructure search, rarely ever need to change this
+FoF_search_type=1 #default phase-space FOF search. Don't really need to change
+Iterative_searchflag=1 #iterative substructure search, for substructure find initial candidate substructures with smaller linking lengths then expand search region
+Outlier_threshold=2.5 #outlier threshold for a particle to be considered residing in substructure, that is how dynamically distinct a particle is. Typical values are >2
+Velocity_ratio=2.0 #ratio of speeds used in phase-space FOF
+Velocity_opening_angle=0.10 #angle between velocities. 18 degrees here, typical values are ~10-30
+Physical_linking_length=0.10 #physical linking length. IF reading periodic volumes in gadget/hdf/ramses, in units of the effective inter-particle spacing. Otherwise in user defined code units. Here set to 0.10 as iterative flag one, values of 0.1-0.3 are typical.
+Velocity_linking_length=0.20 #where scaled by structure dispersion
+Significance_level=1.0 #how significant a substructure is relative to Poisson noise. Values >= 1 are fine.
+
+#for iterative substructure search, rarely ever need to change this
+Iterative_threshold_factor=1.0 #change in threshold value when using iterative search. Here no increase in threshold if iterative or not
+Iterative_linking_length_factor=2.0 #increase in final linking final iterative substructure search will be sqrt(2.25)*this factor
+Iterative_Vratio_factor=1.0 #change in Vratio when using iterative search. no change in vratio
+Iterative_ThetaOp_factor=1.0 #change in velocity opening angle. no change in velocity opening angle
+
+#for checking for halo merger remnants, which are defined as large, well separated phase-space density maxima
+Halo_core_search=2 # searches for separate 6dfof cores in field haloes, and then more than just flags halo as merging, assigns particles to each merging "halo". 2 is full separation, 1 is flagging, 0 is off
+#if searching for cores, linking lengths. likely does not need to change much
+Use_adaptive_core_search=0 #calculate dispersions in configuration & vel space to determine linking lengths
+Use_phase_tensor_core_growth=2 #use full stepped phase-space tensor assignment
+Halo_core_ellx_fac=0.7 #how linking lengths are changed when searching for local 6DFOF cores,
+Halo_core_ellv_fac=2.0 #how velocity lengths based on dispersions are changed when searching for local 6DFOF cores
+Halo_core_ncellfac=0.005 #fraction of total halo particle number setting min size of a local 6DFOF core
+Halo_core_num_loops=8 #number of loops to iteratively search for cores
+Halo_core_loop_ellx_fac=0.75 #how much to change the configuration space linking per iteration
+Halo_core_loop_ellv_fac=1.0 #how much to change the velocity space linking per iteration
+Halo_core_loop_elln_fac=1.2 #how much to change the min number of particles per iteration
+Halo_core_phase_significance=2.0 #how significant a core must be in terms of dispersions (sigma) significance
+
+################################
+#Unbinding options (VELOCIraptor is able to accurately identify tidal debris so particles need not be bound to a structure)
+################################
+
+#unbinding related items
+Unbind_flag=1 #run unbinding
+#alpha factor used to determine whether particle is "bound" alaph*T+W<0. For standard subhalo catalogues use >0.9 but if interested in tidal debris 0.2-0.5
+Allowed_kinetic_potential_ratio=0.95
+#run unbinding of field structures, aka halos
+Bound_halos=0
+#simple Plummer softening length when calculating gravitational energy. If cosmological simulation with period, is fraction of interparticle spacing
+Softening_length=0.
+#don't keep background potential when unbinding
+Keep_background_potential=0
+
+################################
+#Cosmological parameters
+#this is typically overwritten by information in the gadget/hdf header if those input file types are read
+################################
+h_val=1.0
+Omega_m=0.3
+Omega_Lambda=0.7
+Critical_density=1.0
+Virial_density=200 #so-called virial overdensity value
+Omega_b=0. #no baryons
+
+################################
+#Calculation of properties related options
+################################
+#when calculating properties, for field objects calculate inclusive masses
+Inclusive_halo_masses=1 #calculate inclusive masses
+#ensures that output is comoving distances per little h
+Comoving_units=0
+
+################################
+#output related
+################################
+
+Write_group_array_file=0 #write a group array file
+Separate_output_files=0 #separate output into field and substructure files similar to subfind
+Binary_output=2 #binary output 1, ascii 0, and HDF 2
+
+#halo ids are adjusted by this value * 1000000000000 (or 1000000 if code compiled with the LONGINTS option turned off)
+#to ensure that halo ids are temporally unique. So if you had 100 snapshots, for snap 100 set this to 100 and 100*1000000000000 will
+#be added to the halo id as set for this snapshot, so halo 1 becomes halo 100*1000000000000+1 and halo 1 of snap 0 would just have ID=1
+
+#ALTER THIS as part of a script to get temporally unique ids
+Snapshot_value=SNAP
+
+################################
+#other options
+################################
+Verbose=0 #how talkative do you want the code to be, 0 not much, 1 a lot, 2 chatterbox
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/README b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/README
new file mode 100644
index 0000000000000000000000000000000000000000..357250f79e5e2b5d5408b3685c95767838f4bb70
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/README
@@ -0,0 +1,22 @@
+Small LCDM cosmological simulation generated by C. Power. Cosmology
+is WMAP9 and the box is 100Mpc/h in size with 64^3 particles.
+We use a softening length of 1/25th of the mean inter-particle separation.
+
+The ICs have been generated to run with Gadget-2 so we need to switch
+on the options to cancel the h-factors and a-factors at reading time.
+We generate gas from the ICs using SWIFT's internal mechanism and set the
+temperature to the expected gas temperature at this redshift.
+
+This example runs with cooling switch on. Depending on the cooling
+model chosen at the time SWIFT was configured, the answer will be
+different. Interesting cases to compare to the no-cooling case are
+a constant cooling rate or Compton cooling.
+
+The 'plotTempEvolution.py' script plots the temperature evolution of
+the gas in the simulated volume.
+
+The 'plotRhoT.py script plots the phase-space diagram for a given
+snapshot.
+
+MD5 checksum of the ICs:
+08736c3101fd738e22f5159f78e6022b  small_cosmo_volume.hdf5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getCoolingTables.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getCoolingTables.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ecd581fd3dd44a13af1218d7dee6af72a25a324a
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getCoolingTables.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/CoolingTables/EAGLE/coolingtables.tar.gz
+tar -xvzf coolingtables.tar.gz
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getIC.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3b8136cc5aca00a25792655c6c505cfeeb0f2bc9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/getIC.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/small_cosmo_volume.hdf5
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotRhoT.py b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotRhoT.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ba8ad66daca1d9614be8917a77407dd99209dea
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotRhoT.py
@@ -0,0 +1,163 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+import matplotlib
+
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 9,
+    "legend.fontsize": 9,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "text.usetex": True,
+    "figure.figsize": (3.15, 3.15),
+    "figure.subplot.left": 0.15,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.13,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.15,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 2.0,
+    "text.latex.unicode": True,
+}
+rcParams.update(params)
+rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+snap = int(sys.argv[1])
+
+# Read the simulation data
+sim = h5py.File("snap_%04d.hdf5" % snap, "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+z = sim["/Cosmology"].attrs["Redshift"][0]
+a = sim["/Cosmology"].attrs["Scale-factor"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs[
+    "Hydrogen ionization transition temperature"
+][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal ean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4.0 / (8.0 - 5.0 * (1.0 - H_frac))
+    else:
+        return 4.0 / (1.0 + 3.0 * H_frac)
+
+
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.0) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = T_over_mu > (T_trans + 1) / mu(T_trans + 1, H_frac, T_trans)
+    if np.sum(mask_ionized) > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans * 10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = T_over_mu < (T_trans - 1) / mu((T_trans - 1), H_frac, T_trans)
+    if np.sum(mask_neutral) > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+
+    return ret
+
+
+rho = sim["/PartType0/Density"][:]
+u = sim["/PartType0/InternalEnergy"][:]
+
+# Compute the temperature
+u *= unit_length_in_si ** 2 / unit_time_in_si ** 2
+u /= a ** (3 * (gas_gamma - 1.0))
+Temp = T(u)
+
+# Compute the physical density
+rho *= unit_mass_in_cgs / unit_length_in_cgs ** 3
+rho /= a ** 3
+rho /= mH_in_kg
+
+# Life is better in log-space
+log_T = np.log10(Temp)
+log_rho = np.log10(rho)
+
+
+# Make a 2D histogram
+log_rho_min = -6
+log_rho_max = 3
+log_T_min = 1
+log_T_max = 8
+
+bins_x = np.linspace(log_rho_min, log_rho_max, 54)
+bins_y = np.linspace(log_T_min, log_T_max, 54)
+H, _, _ = histogram2d(log_rho, log_T, bins=[bins_x, bins_y], normed=True)
+
+
+# Plot the interesting quantities
+figure()
+
+pcolormesh(bins_x, bins_y, np.log10(H).T)
+
+text(-5, 8.0, "$z=%.2f$" % z)
+
+xticks(
+    [-5, -4, -3, -2, -1, 0, 1, 2, 3],
+    ["", "$10^{-4}$", "", "$10^{-2}$", "", "$10^0$", "", "$10^2$", ""],
+)
+yticks(
+    [2, 3, 4, 5, 6, 7, 8], ["$10^{2}$", "", "$10^{4}$", "", "$10^{6}$", "", "$10^8$"]
+)
+xlabel("${\\rm Density}~n_{\\rm H}~[{\\rm cm^{-3}}]$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=2)
+xlim(-5.2, 3.2)
+ylim(1, 8.5)
+
+savefig("rhoT_%04d.png" % snap, dpi=200)
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotTempEvolution.py b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotTempEvolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3458ac1598e5657f3f597dfb10b36a7a641e68f
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/plotTempEvolution.py
@@ -0,0 +1,195 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+# Number of snapshots generated
+n_snapshots = 200
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 9,
+'legend.fontsize': 9,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (3.15,3.15),
+'figure.subplot.left'    : 0.14,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.12,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 2.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+# Read the simulation data
+sim = h5py.File("snap_0000.hdf5", "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs["Hydrogen ionization transition temperature"][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+cooling_model = sim["/SubgridScheme"].attrs["Cooling Model"]
+
+if cooling_model == "Constant Lambda":
+    Lambda = sim["/SubgridScheme"].attrs["Lambda/n_H^2 [cgs]"][0]   
+    
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal mean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4. / (8. - 5. * (1. - H_frac))
+    else:
+        return 4. / (1. + 3. * H_frac)
+    
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = (T_over_mu > (T_trans+1) / mu(T_trans+1, H_frac, T_trans))
+    if np.sum(mask_ionized)  > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans*10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = (T_over_mu < (T_trans-1) / mu((T_trans-1), H_frac, T_trans))
+    if np.sum(mask_neutral)  > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+        
+    return ret
+
+z = np.zeros(n_snapshots)
+a = np.zeros(n_snapshots)
+T_mean = np.zeros(n_snapshots)
+T_std = np.zeros(n_snapshots)
+T_log_mean = np.zeros(n_snapshots)
+T_log_std = np.zeros(n_snapshots)
+T_median = np.zeros(n_snapshots)
+T_min = np.zeros(n_snapshots)
+T_max = np.zeros(n_snapshots)
+
+# Loop over all the snapshots
+for i in range(n_snapshots):
+    sim = h5py.File("snap_%04d.hdf5"%i, "r")
+
+    z[i] = sim["/Cosmology"].attrs["Redshift"][0]
+    a[i] = sim["/Cosmology"].attrs["Scale-factor"][0]
+
+    u = sim["/PartType0/InternalEnergy"][:]
+
+    # Compute the temperature
+    u *= (unit_length_in_si**2 / unit_time_in_si**2)
+    u /= a[i]**(3 * (gas_gamma - 1.))
+    Temp = T(u)
+
+    # Gather statistics
+    T_median[i] = np.median(Temp)
+    T_mean[i] = Temp.mean()
+    T_std[i] = Temp.std()
+    T_log_mean[i] = np.log10(Temp).mean()
+    T_log_std[i] = np.log10(Temp).std()
+    T_min[i] = Temp.min()
+    T_max[i] = Temp.max()
+
+# CMB evolution
+a_evol = np.logspace(-3, 0, 60)
+T_cmb = (1. / a_evol)**2 * 2.72
+
+# Plot the interesting quantities
+figure()
+subplot(111, xscale="log", yscale="log")
+
+fill_between(a, T_mean-T_std, T_mean+T_std, color='C0', alpha=0.1)
+plot(a, T_max, ls='-.', color='C0', lw=1., label="${\\rm max}~T$")
+plot(a, T_min, ls=':', color='C0', lw=1., label="${\\rm min}~T$")
+plot(a, T_mean, color='C0', label="${\\rm mean}~T$", lw=1.5)
+fill_between(a, 10**(T_log_mean-T_log_std), 10**(T_log_mean+T_log_std), color='C1', alpha=0.1)
+plot(a, 10**T_log_mean, color='C1', label="${\\rm mean}~{\\rm log} T$", lw=1.5)
+plot(a, T_median, color='C2', label="${\\rm median}~T$", lw=1.5)
+
+legend(loc="upper left", frameon=False, handlelength=1.5)
+
+# Cooling model
+if cooling_model == "Constant Lambda":
+    text(1e-2, 6e4, "$\Lambda_{\\rm const}/n_{\\rm H}^2 = %.1f\\times10^{%d}~[\\rm{cgs}]$"%(Lambda/10.**(int(log10(Lambda))), log10(Lambda)), fontsize=7)
+elif cooling_model == "EAGLE":
+    text(1e-2, 6e4, "EAGLE (Wiersma et al. 2009)")
+elif cooling_model == b"Grackle":
+    text(1e-2, 6e4, "Grackle (Smith et al. 2016)")
+else:
+    text(1e-2, 6e4, "No cooling")
+    
+# Expected lines
+plot([1e-10, 1e10], [H_transition_temp, H_transition_temp], 'k--', lw=0.5, alpha=0.7)
+text(2.5e-2, H_transition_temp*1.07, "$T_{\\rm HII\\rightarrow HI}$", va="bottom", alpha=0.7, fontsize=8)
+plot([1e-10, 1e10], [T_minimal, T_minimal], 'k--', lw=0.5, alpha=0.7)
+text(1e-2, T_minimal*0.8, "$T_{\\rm min}$", va="top", alpha=0.7, fontsize=8)
+plot(a_evol, T_cmb, 'k--', lw=0.5, alpha=0.7)
+text(a_evol[20], T_cmb[20]*0.55, "$(1+z)^2\\times T_{\\rm CMB,0}$", rotation=-34, alpha=0.7, fontsize=8, va="top", bbox=dict(facecolor='w', edgecolor='none', pad=1.0, alpha=0.9))
+
+
+redshift_ticks = np.array([0., 1., 2., 5., 10., 20., 50., 100.])
+redshift_labels = ["$0$", "$1$", "$2$", "$5$", "$10$", "$20$", "$50$", "$100$"]
+a_ticks = 1. / (redshift_ticks + 1.)
+
+xticks(a_ticks, redshift_labels)
+minorticks_off()
+
+xlabel("${\\rm Redshift}~z$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=0)
+xlim(9e-3, 1.1)
+ylim(5, 2.5e7)
+
+savefig("Temperature_evolution.png", dpi=200)
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/run.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a7ae9dab54975efaf523de323a127b8134663544
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/run.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e small_cosmo_volume.hdf5 ]
+then
+    echo "Fetching initial conditions for the small cosmological volume example..."
+    ./getIC.sh
+fi
+
+if [ ! -e CloudyData_UVB=HM2012.h5 ]
+then
+    ../../Cooling/getCoolingTable.sh 
+fi
+
+if [ ! -e coolingtables ]
+then
+    echo "Fetching cooling tables for the small cosmological volume example..."
+    ./getCoolingTables.sh
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --self-gravity --cooling --threads=8 small_cosmo_volume.yml 2>&1 | tee output.log
+
+# Plot the temperature evolution
+python plotTempEvolution.py
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..96f10465410ac120b30904a8655da4d8133d09bd
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_cooling/small_cosmo_volume.yml
@@ -0,0 +1,100 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+Cosmology:                      # WMAP9 cosmology
+  Omega_m:        0.276
+  Omega_lambda:   0.724
+  Omega_b:        0.0455
+  h:              0.703
+  a_begin:        0.019607843	# z_ini = 50.
+  a_end:          1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-8
+  dt_max:     1e-2 
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:          0.025         
+  theta:        0.3           
+  comoving_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_softening: 0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  mesh_side_length:       64
+
+# Parameters of the hydro scheme
+SPH:
+  resolution_eta:      1.2348   # "48 Ngb" with the cubic spline kernel
+  h_min_ratio:         0.1
+  CFL_condition:       0.1
+  initial_temperature: 7075.    # (1 + z_ini)^2 * 2.72K
+  minimal_temperature: 100.
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            snap
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  small_cosmo_volume.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
+  generate_gas_in_ics:         1    # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths:   1    # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Constant lambda cooling function
+LambdaCooling:
+  lambda_nH2_cgs:              1e-26 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
+
+# EAGLE cooling function
+EAGLECooling:
+  dir_name:                 ./coolingtables/
+  H_reion_z:               11.5
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_eV_p_H:         2.0
+
+# Impose primoridal metallicity
+EAGLEChemistry:
+  init_abundance_metal:     0.0
+  init_abundance_Hydrogen:  0.752
+  init_abundance_Helium:    0.248
+  init_abundance_Carbon:    0.0
+  init_abundance_Nitrogen:  0.0
+  init_abundance_Oxygen:    0.0
+  init_abundance_Neon:      0.0
+  init_abundance_Magnesium: 0.0
+  init_abundance_Silicon:   0.0
+  init_abundance_Iron:      0.0
+
+# Cooling with Grackle 3.0
+GrackleCooling:
+  CloudyTable: CloudyData_UVB=HM2012.h5 # Name of the Cloudy Table (available on the grackle bitbucket repository)
+  WithUVbackground: 1                   # Enable or not the UV background
+  Redshift: -1                           # Redshift to use (-1 means time based redshift)
+  WithMetalCooling: 1                   # Enable or not the metal cooling
+  ProvideVolumetricHeatingRates: 0      # (optional) User provide volumetric heating rates
+  ProvideSpecificHeatingRates: 0        # (optional) User provide specific heating rates
+  SelfShieldingMethod: 0                # (optional) Grackle (<= 3) or Gear self shielding method
+  MaxSteps: 10000                       # (optional) Max number of step when computing the initial composition
+  ConvergenceLimit: 1e-2                # (optional) Convergence threshold (relative) for initial composition
+
+GearChemistry:
+  InitialMetallicity: 0.01295
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/README b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/README
new file mode 100644
index 0000000000000000000000000000000000000000..a0abad5f814f87133dccc31d414bdc546609df88
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/README
@@ -0,0 +1,14 @@
+Small LCDM cosmological simulation generated by C. Power. Cosmology
+is WMAP9 and the box is 100Mpc/h in size with 64^3 particles.
+We use a softening length of 1/25th of the mean inter-particle separation.
+
+The ICs have been generated to run with Gadget-2 so we need to switch
+on the options to cancel the h-factors and a-factors at reading time.
+We generate gas from the ICs using SWIFT's internal mechanism and set the
+temperature to the expected gas temperature at this redshift.
+
+The 'plotTempEvolution.py' plots the temperature evolution of the gas
+in the simulated volume.
+
+MD5 checksum of the ICs:
+08736c3101fd738e22f5159f78e6022b  small_cosmo_volume.hdf5
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/getIC.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/getIC.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3b8136cc5aca00a25792655c6c505cfeeb0f2bc9
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/getIC.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/small_cosmo_volume.hdf5
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/plotTempEvolution.py b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/plotTempEvolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa6c5df5fe5ff5c7d0944a45bb11344f70c57844
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/plotTempEvolution.py
@@ -0,0 +1,182 @@
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+# Computes the temperature evolution of the gas in a cosmological box
+
+# Physical constants needed for internal energy to temperature conversion
+k_in_J_K = 1.38064852e-23
+mH_in_kg = 1.6737236e-27
+
+# Number of snapshots generated
+n_snapshots = 200
+
+import matplotlib
+matplotlib.use("Agg")
+from pylab import *
+import h5py
+import os.path
+
+# Plot parameters
+params = {'axes.labelsize': 10,
+'axes.titlesize': 10,
+'font.size': 9,
+'legend.fontsize': 9,
+'xtick.labelsize': 10,
+'ytick.labelsize': 10,
+'text.usetex': True,
+ 'figure.figsize' : (3.15,3.15),
+'figure.subplot.left'    : 0.14,
+'figure.subplot.right'   : 0.99,
+'figure.subplot.bottom'  : 0.12,
+'figure.subplot.top'     : 0.99,
+'figure.subplot.wspace'  : 0.15,
+'figure.subplot.hspace'  : 0.12,
+'lines.markersize' : 6,
+'lines.linewidth' : 2.,
+'text.latex.unicode': True
+}
+rcParams.update(params)
+rc('font',**{'family':'sans-serif','sans-serif':['Times']})
+
+# Read the simulation data
+sim = h5py.File("snap_0000.hdf5", "r")
+boxSize = sim["/Header"].attrs["BoxSize"][0]
+time = sim["/Header"].attrs["Time"][0]
+scheme = sim["/HydroScheme"].attrs["Scheme"][0]
+kernel = sim["/HydroScheme"].attrs["Kernel function"][0]
+neighbours = sim["/HydroScheme"].attrs["Kernel target N_ngb"][0]
+eta = sim["/HydroScheme"].attrs["Kernel eta"][0]
+alpha = sim["/HydroScheme"].attrs["Alpha viscosity"][0]
+H_mass_fraction = sim["/HydroScheme"].attrs["Hydrogen mass fraction"][0]
+H_transition_temp = sim["/HydroScheme"].attrs["Hydrogen ionization transition temperature"][0]
+T_initial = sim["/HydroScheme"].attrs["Initial temperature"][0]
+T_minimal = sim["/HydroScheme"].attrs["Minimal temperature"][0]
+git = sim["Code"].attrs["Git Revision"]
+
+# Cosmological parameters
+H_0 = sim["/Cosmology"].attrs["H0 [internal units]"][0]
+gas_gamma = sim["/HydroScheme"].attrs["Adiabatic index"][0]
+
+unit_length_in_cgs = sim["/Units"].attrs["Unit length in cgs (U_L)"]
+unit_mass_in_cgs = sim["/Units"].attrs["Unit mass in cgs (U_M)"]
+unit_time_in_cgs = sim["/Units"].attrs["Unit time in cgs (U_t)"]
+
+unit_length_in_si = 0.01 * unit_length_in_cgs
+unit_mass_in_si = 0.001 * unit_mass_in_cgs
+unit_time_in_si = unit_time_in_cgs
+
+# Primoridal ean molecular weight as a function of temperature
+def mu(T, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    if T > T_trans:
+        return 4. / (8. - 5. * (1. - H_frac))
+    else:
+        return 4. / (1. + 3. * H_frac)
+    
+# Temperature of some primoridal gas with a given internal energy
+def T(u, H_frac=H_mass_fraction, T_trans=H_transition_temp):
+    T_over_mu = (gas_gamma - 1.) * u * mH_in_kg / k_in_J_K
+    ret = np.ones(np.size(u)) * T_trans
+
+    # Enough energy to be ionized?
+    mask_ionized = (T_over_mu > (T_trans+1) / mu(T_trans+1, H_frac, T_trans))
+    if np.sum(mask_ionized)  > 0:
+        ret[mask_ionized] = T_over_mu[mask_ionized] * mu(T_trans*10, H_frac, T_trans)
+
+    # Neutral gas?
+    mask_neutral = (T_over_mu < (T_trans-1) / mu((T_trans-1), H_frac, T_trans))
+    if np.sum(mask_neutral)  > 0:
+        ret[mask_neutral] = T_over_mu[mask_neutral] * mu(0, H_frac, T_trans)
+        
+    return ret
+
+
+z = np.zeros(n_snapshots)
+a = np.zeros(n_snapshots)
+T_mean = np.zeros(n_snapshots)
+T_std = np.zeros(n_snapshots)
+T_log_mean = np.zeros(n_snapshots)
+T_log_std = np.zeros(n_snapshots)
+T_median = np.zeros(n_snapshots)
+T_min = np.zeros(n_snapshots)
+T_max = np.zeros(n_snapshots)
+
+# Loop over all the snapshots
+for i in range(n_snapshots):
+    sim = h5py.File("snap_%04d.hdf5"%i, "r")
+
+    z[i] = sim["/Cosmology"].attrs["Redshift"][0]
+    a[i] = sim["/Cosmology"].attrs["Scale-factor"][0]
+
+    u = sim["/PartType0/InternalEnergy"][:]
+
+    # Compute the temperature
+    u *= (unit_length_in_si**2 / unit_time_in_si**2)
+    u /= a[i]**(3 * (gas_gamma - 1.))
+    Temp = T(u)
+
+    # Gather statistics
+    T_median[i] = np.median(Temp)
+    T_mean[i] = Temp.mean()
+    T_std[i] = Temp.std()
+    T_log_mean[i] = np.log10(Temp).mean()
+    T_log_std[i] = np.log10(Temp).std()
+    T_min[i] = Temp.min()
+    T_max[i] = Temp.max()
+
+# CMB evolution
+a_evol = np.logspace(-3, 0, 60)
+T_cmb = (1. / a_evol)**2 * 2.72
+
+# Plot the interesting quantities
+figure()
+subplot(111, xscale="log", yscale="log")
+
+fill_between(a, T_mean-T_std, T_mean+T_std, color='C0', alpha=0.1)
+plot(a, T_max, ls='-.', color='C0', lw=1., label="${\\rm max}~T$")
+plot(a, T_min, ls=':', color='C0', lw=1., label="${\\rm min}~T$")
+plot(a, T_mean, color='C0', label="${\\rm mean}~T$", lw=1.5)
+fill_between(a, 10**(T_log_mean-T_log_std), 10**(T_log_mean+T_log_std), color='C1', alpha=0.1)
+plot(a, 10**T_log_mean, color='C1', label="${\\rm mean}~{\\rm log} T$", lw=1.5)
+plot(a, T_median, color='C2', label="${\\rm median}~T$", lw=1.5)
+
+legend(loc="upper left", frameon=False, handlelength=1.5)
+
+# Expected lines
+plot([1e-10, 1e10], [H_transition_temp, H_transition_temp], 'k--', lw=0.5, alpha=0.7)
+text(2.5e-2, H_transition_temp*1.07, "$T_{\\rm HII\\rightarrow HI}$", va="bottom", alpha=0.7, fontsize=8)
+plot([1e-10, 1e10], [T_minimal, T_minimal], 'k--', lw=0.5, alpha=0.7)
+text(1e-2, T_minimal*0.8, "$T_{\\rm min}$", va="top", alpha=0.7, fontsize=8)
+plot(a_evol, T_cmb, 'k--', lw=0.5, alpha=0.7)
+text(a_evol[20], T_cmb[20]*0.55, "$(1+z)^2\\times T_{\\rm CMB,0}$", rotation=-34, alpha=0.7, fontsize=8, va="top", bbox=dict(facecolor='w', edgecolor='none', pad=1.0, alpha=0.9))
+
+
+redshift_ticks = np.array([0., 1., 2., 5., 10., 20., 50., 100.])
+redshift_labels = ["$0$", "$1$", "$2$", "$5$", "$10$", "$20$", "$50$", "$100$"]
+a_ticks = 1. / (redshift_ticks + 1.)
+
+xticks(a_ticks, redshift_labels)
+minorticks_off()
+
+xlabel("${\\rm Redshift}~z$", labelpad=0)
+ylabel("${\\rm Temperature}~T~[{\\rm K}]$", labelpad=0)
+xlim(9e-3, 1.1)
+ylim(20, 2.5e7)
+
+savefig("Temperature_evolution.png", dpi=200)
+
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/run.sh b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b2585d70b7cd2b717af02f005d690d0e8a9f932e
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/run.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e small_cosmo_volume.hdf5 ]
+then
+    echo "Fetching initial conditions for the small cosmological volume example..."
+    ./getIC.sh
+fi
+
+# Run SWIFT
+../../swift --cosmology --hydro --self-gravity --threads=8 small_cosmo_volume.yml 2>&1 | tee output.log
+
+# Plot the temperature evolution
+python plotTempEvolution.py
diff --git a/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/small_cosmo_volume.yml b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/small_cosmo_volume.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e0d633079e941ade161b7e2fde0fbc063cbac254
--- /dev/null
+++ b/examples/SmallCosmoVolume/SmallCosmoVolume_hydro/small_cosmo_volume.yml
@@ -0,0 +1,72 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun
+  UnitLength_in_cgs:   3.08567758e24 # 1 Mpc
+  UnitVelocity_in_cgs: 1e5           # 1 km/s
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+Cosmology:                      # WMAP9 cosmology
+  Omega_m:        0.276
+  Omega_lambda:   0.724
+  Omega_b:        0.0455
+  h:              0.703
+  a_begin:        0.019607843	# z_ini = 50.
+  a_end:          1.0		# z_end = 0.
+
+# Parameters governing the time integration
+TimeIntegration:
+  dt_min:     1e-6 
+  dt_max:     1e-2 
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:          0.025         
+  theta:        0.3           
+  comoving_softening:     0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  max_physical_softening: 0.0889     # 1/25th of the mean inter-particle separation: 88.9 kpc
+  mesh_side_length:       64
+
+# Parameters of the hydro scheme
+SPH:
+  resolution_eta:      1.2348   # "48 Ngb" with the cubic spline kernel
+  h_min_ratio:         0.1
+  CFL_condition:       0.1
+  initial_temperature: 7075.    # (1 + z_ini)^2 * 2.72K
+  minimal_temperature: 100.
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            snap
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1.02
+  scale_factor_first:  0.02
+  
+Scheduler:
+  max_top_level_cells: 8
+  cell_split_size:     50
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  small_cosmo_volume.hdf5
+  periodic:                    1
+  cleanup_h_factors:           1    
+  cleanup_velocity_factors:    1  
+  generate_gas_in_ics:         1      # Generate gas particles from the DM-only ICs
+  cleanup_smoothing_lengths:   1      # Since we generate gas, make use of the (expensive) cleaning-up procedure.
+
+# Parameters for the EAGLE "equation of state"
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3: 1e-5       # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:    10.        # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:        8000       # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:           1.         # Slope the of the EAGLE Cool limiter entropy floor
+
diff --git a/examples/SmallCosmoVolume/small_cosmo_volume.yml b/examples/SmallCosmoVolume/small_cosmo_volume.yml
deleted file mode 100644
index 32ec15db6be35fed4eb0c0168f52f0ba919158ea..0000000000000000000000000000000000000000
--- a/examples/SmallCosmoVolume/small_cosmo_volume.yml
+++ /dev/null
@@ -1,60 +0,0 @@
-# Define the system of units to use internally. 
-InternalUnitSystem:
-  UnitMass_in_cgs:     1.98848e43    # 10^10 M_sun in grams
-  UnitLength_in_cgs:   3.08567758e24 # Mpc in centimeters
-  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
-  UnitCurrent_in_cgs:  1             # Amperes
-  UnitTemp_in_cgs:     1             # Kelvin
-
-# Structure finding options
-StructureFinding:
-  config_file_name:     stf_input_6dfof_dmonly_sub.cfg  # Name of the STF config file.
-  basename:             ./stf                           # Common part of the name of output files.
-  output_time_format:   0                               # Specifies the frequency format of structure finding. 0 for simulation steps (delta_step) and 1 for simulation time intervals (delta_time).
-  scale_factor_first:   0.92                            # Scale-factor of the first snaphot (cosmological run)
-  time_first:           0.01                            # Time of the first structure finding output (in internal units).
-  delta_step:           1000                            # Time difference between consecutive structure finding outputs (in internal units) in simulation steps.
-  delta_time:           1.02                            # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
-
-# WMAP9 cosmology
-Cosmology:
-  Omega_m:        0.276
-  Omega_lambda:   0.724
-  Omega_b:        0.0455
-  h:              0.703
-  a_begin:        0.0196078
-  a_end:          1.0
-
-# Parameters governing the time integration
-TimeIntegration:
-  dt_min:     1e-6 
-  dt_max:     1e-2 
-
-# Parameters for the self-gravity scheme
-Gravity:
-  eta:          0.025         
-  theta:        0.3           
-  comoving_softening:     0.08
-  max_physical_softening: 0.08
-  mesh_side_length:         32
-  
-# Parameters governing the snapshots
-Snapshots:
-  basename:            snap
-  delta_time:          1.02
-  scale_factor_first:  0.02
-  
-# Parameters governing the conserved quantities statistics
-Statistics:
-  delta_time:          1.02
-  scale_factor_first:  0.02
-  
-Scheduler:
-  max_top_level_cells: 8
-  cell_split_size:     50
-  
-# Parameters related to the initial conditions
-InitialConditions:
-  file_name:  small_cosmo_volume.hdf5
-  cleanup_h_factors:           1    
-  cleanup_velocity_factors:    1  
diff --git a/examples/SmoothedMetallicity/getGlass.sh b/examples/SubgridTests/SmoothedMetallicity/getGlass.sh
similarity index 100%
rename from examples/SmoothedMetallicity/getGlass.sh
rename to examples/SubgridTests/SmoothedMetallicity/getGlass.sh
diff --git a/examples/SmoothedMetallicity/makeIC.py b/examples/SubgridTests/SmoothedMetallicity/makeIC.py
similarity index 97%
rename from examples/SmoothedMetallicity/makeIC.py
rename to examples/SubgridTests/SmoothedMetallicity/makeIC.py
index 86679d5efe897b9dfae7db94b36d74bb047661e6..542b4c5911c942015d16595f42e73ca8978d20da 100644
--- a/examples/SmoothedMetallicity/makeIC.py
+++ b/examples/SubgridTests/SmoothedMetallicity/makeIC.py
@@ -84,10 +84,6 @@ grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
 grp.attrs["Flag_Entropy_ICs"] = 0
 grp.attrs["Dimension"] = 3
 
-# Runtime parameters
-grp = file.create_group("/RuntimePars")
-grp.attrs["PeriodicBoundariesOn"] = 1
-
 # Units
 grp = file.create_group("/Units")
 grp.attrs["Unit length in cgs (U_L)"] = 1.
diff --git a/examples/SmoothedMetallicity/plotSolution.py b/examples/SubgridTests/SmoothedMetallicity/plotSolution.py
similarity index 100%
rename from examples/SmoothedMetallicity/plotSolution.py
rename to examples/SubgridTests/SmoothedMetallicity/plotSolution.py
diff --git a/examples/SmoothedMetallicity/run.sh b/examples/SubgridTests/SmoothedMetallicity/run.sh
similarity index 82%
rename from examples/SmoothedMetallicity/run.sh
rename to examples/SubgridTests/SmoothedMetallicity/run.sh
index de8c55d678bcb611934af450940d8ed8e6c15d6b..736a16fc14ece7b09e13b61cd8e04f9735e6cfc6 100755
--- a/examples/SmoothedMetallicity/run.sh
+++ b/examples/SubgridTests/SmoothedMetallicity/run.sh
@@ -13,7 +13,7 @@ then
 fi
 
 # Run SWIFT
-../swift -n 1 -s -t 4 smoothed_metallicity.yml 2>&1 | tee output.log
+../../swift --steps=1 --hydro --threads=4 smoothed_metallicity.yml 2>&1 | tee output.log
 
 # Plot the solution
 python plotSolution.py 1
diff --git a/examples/SmoothedMetallicity/smoothed_metallicity.yml b/examples/SubgridTests/SmoothedMetallicity/smoothed_metallicity.yml
similarity index 98%
rename from examples/SmoothedMetallicity/smoothed_metallicity.yml
rename to examples/SubgridTests/SmoothedMetallicity/smoothed_metallicity.yml
index 2e37695392b12c545bbbdbe7fd94748d5b3b9ff8..f6841c6bd0744b4bbeacbe136a126b4ed5631f6f 100644
--- a/examples/SmoothedMetallicity/smoothed_metallicity.yml
+++ b/examples/SubgridTests/SmoothedMetallicity/smoothed_metallicity.yml
@@ -31,4 +31,5 @@ SPH:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  ./smoothed_metallicity.hdf5          # The file to read
+  periodic:   1
 
diff --git a/examples/SubgridTests/SupernovaeFeedback/SN_feedback.yml b/examples/SubgridTests/SupernovaeFeedback/SN_feedback.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a59ae302ff4052a3daf0535e93a0c2cd5e9904f5
--- /dev/null
+++ b/examples/SubgridTests/SupernovaeFeedback/SN_feedback.yml
@@ -0,0 +1,44 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1   # Grams
+  UnitLength_in_cgs:   1   # Centimeters
+  UnitVelocity_in_cgs: 1   # Centimeters per second
+  UnitCurrent_in_cgs:  1   # Amperes
+  UnitTemp_in_cgs:     1   # Kelvin
+
+# Values of some physical constants
+PhysicalConstants:
+  G:            0 # (Optional) Overwrite the value of Newton's constant used internally by the code.
+
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   5e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-7  # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-4  # The maximal time-step size of the simulation (in internal units).
+
+# Parameters governing the snapshots
+Snapshots:
+  basename:            SN_feedback # Common part of the name of output files
+  time_first:          0.    # Time of the first output (in internal units)
+  delta_time:          1e-2  # Time difference between consecutive outputs (in internal units)
+  compression:         1
+ 
+# Parameters governing the conserved quantities statistics
+Statistics:
+  delta_time:          1e-3 # Time between statistics output
+
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:                    ./SN_feedback.hdf5          
+  smoothing_length_scaling:     1.
+  periodic:                    1    # Are we running with periodic ICs?
+ 
+# Parameters for the stellar models
+Stars:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
diff --git a/examples/SubgridTests/SupernovaeFeedback/getGlass.sh b/examples/SubgridTests/SupernovaeFeedback/getGlass.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d5c5f590ac37c9c9431d626a2ea61b0c12c1513c
--- /dev/null
+++ b/examples/SubgridTests/SupernovaeFeedback/getGlass.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassCube_64.hdf5
diff --git a/examples/SubgridTests/SupernovaeFeedback/makeIC.py b/examples/SubgridTests/SupernovaeFeedback/makeIC.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a0fca2bfd21f2cf1d5052660581d97184705480
--- /dev/null
+++ b/examples/SubgridTests/SupernovaeFeedback/makeIC.py
@@ -0,0 +1,117 @@
+###############################################################################
+ # This file is part of SWIFT.
+ # Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ # 
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU Lesser General Public License as published
+ # by the Free Software Foundation, either version 3 of the License, or
+ # (at your option) any later version.
+ # 
+ # This program is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ # 
+ # You should have received a copy of the GNU Lesser General Public License
+ # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ # 
+ ##############################################################################
+
+import h5py
+from numpy import *
+
+# Generates a swift IC file for the Sedov blast test in a periodic cubic box
+
+# Parameters
+gamma = 5./3.      # Gas adiabatic index
+rho0 = 1.          # Background density
+P0 = 1.e-6         # Background pressure
+E0= 1.             # Energy of the explosion
+N_inject = 15      # Number of particles in which to inject energy
+fileName = "SN_feedback.hdf5" 
+
+#---------------------------------------------------
+glass = h5py.File("glassCube_64.hdf5", "r")
+
+# Read particle positions and h from the glass
+pos = glass["/PartType0/Coordinates"][:,:]
+eps = 1e-6
+pos = (pos - pos.min()) / (pos.max() - pos.min() + eps)
+h = glass["/PartType0/SmoothingLength"][:] * 0.3 * 3.3
+
+numPart = size(h)
+vol = 1.
+Boxsize = 1.
+
+# Generate extra arrays
+v = zeros((numPart, 3))
+ids = linspace(1, numPart, numPart)
+m = zeros(numPart)
+u = zeros(numPart)
+r = zeros(numPart)
+
+r = sqrt((pos[:,0] - 0.5)**2 + (pos[:,1] - 0.5)**2 + (pos[:,2] - 0.5)**2)
+m[:] = rho0 * vol / numPart    
+u[:] = P0 / (rho0 * (gamma - 1))
+
+#--------------------------------------------------
+
+star_pos = zeros((1, 3))
+star_pos[:,:] = 0.5 * Boxsize
+
+star_v = zeros((1, 3))
+star_v[:,:] = 0.
+
+# increase mass to keep it at center
+star_m = 1e3 * array([rho0 * vol / numPart])
+star_ids = array([numPart + 1])
+star_h = array([h.max()])
+
+#--------------------------------------------------
+
+#File
+file = h5py.File(fileName, 'w')
+
+# Header
+grp = file.create_group("/Header")
+grp.attrs["BoxSize"] = [Boxsize]*3
+grp.attrs["NumPart_Total"] =  [numPart, 0, 0, 0, 1, 0]
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 1, 0]
+grp.attrs["Time"] = 0.0
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["Flag_Entropy_ICs"] = 0
+grp.attrs["Dimension"] = 3
+
+#Runtime parameters
+grp = file.create_group("/RuntimePars")
+grp.attrs["PeriodicBoundariesOn"] = 0
+
+#Units
+grp = file.create_group("/Units")
+grp.attrs["Unit length in cgs (U_L)"] = 1.
+grp.attrs["Unit mass in cgs (U_M)"] = 1.
+grp.attrs["Unit time in cgs (U_t)"] = 1.
+grp.attrs["Unit current in cgs (U_I)"] = 1.
+grp.attrs["Unit temperature in cgs (U_T)"] = 1.
+
+#Particle group
+grp = file.create_group("/PartType0")
+grp.create_dataset('Coordinates', data=pos, dtype='d')
+grp.create_dataset('Velocities', data=v, dtype='f')
+grp.create_dataset('Masses', data=m, dtype='f')
+grp.create_dataset('SmoothingLength', data=h, dtype='f')
+grp.create_dataset('InternalEnergy', data=u, dtype='f')
+grp.create_dataset('ParticleIDs', data=ids, dtype='L')
+
+# stellar group
+grp = file.create_group("/PartType4")
+grp.create_dataset("Coordinates", data=star_pos, dtype="d")
+grp.create_dataset('Velocities', data=star_v, dtype='f')
+grp.create_dataset('Masses', data=star_m, dtype='f')
+grp.create_dataset('SmoothingLength', data=star_h, dtype='f')
+grp.create_dataset('ParticleIDs', data=star_ids, dtype='L')
+
+
+file.close()
diff --git a/examples/SubgridTests/SupernovaeFeedback/run.sh b/examples/SubgridTests/SupernovaeFeedback/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..af8802164c528fe14b3d525827107868a8ac5720
--- /dev/null
+++ b/examples/SubgridTests/SupernovaeFeedback/run.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+ # Generate the initial conditions if they are not present.
+if [ ! -e glassCube_64.hdf5 ]
+then
+    echo "Fetching initial glass file for the Supernovae feedback example..."
+    ./getGlass.sh
+fi
+if [ ! -e SN_feedback.hdf5 ]
+then
+    echo "Generating initial conditions for the Supernovae feedback example..."
+    python makeIC.py
+fi
+
+# Run SWIFT
+../../swift --external-gravity --feedback --hydro --stars --threads=4 SN_feedback.yml 2>&1 | tee output.log
+
+# Plot the solution
+# TODO
diff --git a/examples/analyse_tasks.py b/examples/analyse_tasks.py
deleted file mode 100755
index a72ee0ce637b6ac2da4b8b95dac5bacab3d40a99..0000000000000000000000000000000000000000
--- a/examples/analyse_tasks.py
+++ /dev/null
@@ -1,373 +0,0 @@
-#!/usr/bin/env python
-"""
-Usage:
-    analyse_tasks.py [options] input.dat
-
-where input.dat is a thread info file for a step (MPI or non-MPI). Use the
-'-y interval' flag of the swift and swift_mpi commands to create these
-(you will also need to configure with the --enable-task-debugging option).
-
-The output is an analysis of the task timings, including deadtime per thread
-and step, total amount of time spent for each task type, for the whole step
-and per thread and the minimum and maximum times spent per task type.
-
-This file is part of SWIFT.
-Copyright (c) 2017 Peter W. Draper (p.w.draper@durham.ac.uk)
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Lesser General Public License as published
-by the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import matplotlib
-matplotlib.use("Agg")
-import matplotlib.collections as collections
-import matplotlib.ticker as plticker
-import pylab as pl
-import sys
-import argparse
-
-#  Handle the command line.
-parser = argparse.ArgumentParser(description="Analyse task dumps")
-
-parser.add_argument("input", help="Thread data file (-y output)")
-parser.add_argument("-v", "--verbose", dest="verbose",
-                    help="Verbose output (default: False)",
-                    default=False, action="store_true")
-parser.add_argument("-r", "--rank", dest="rank",
-                    help="Rank to process (default: all)",
-                    default="all", action="store")
-
-args = parser.parse_args()
-infile = args.input
-
-#  Tasks and subtypes. Indexed as in tasks.h.
-TASKTYPES = ["none", "sort", "self", "pair", "sub_self", "sub_pair",
-             "init_grav", "init_grav_out", "ghost_in", "ghost", "ghost_out", "extra_ghost", "drift_part", "drift_gpart",
-             "end_force", "kick1", "kick2", "timestep", "send", "recv", "grav_long_range", "grav_mm", "grav_down_in", 
-             "grav_down", "grav_mesh", "cooling", "sourceterms", "count"]
-
-SUBTYPES = ["none", "density", "gradient", "force", "grav", "external_grav",
-            "tend", "xv", "rho", "gpart", "multipole", "spart", "count"]
-
-SIDS = ["(-1,-1,-1)", "(-1,-1, 0)", "(-1,-1, 1)", "(-1, 0,-1)",
-        "(-1, 0, 0)", "(-1, 0, 1)", "(-1, 1,-1)", "(-1, 1, 0)",
-        "(-1, 1, 1)", "( 0,-1,-1)", "( 0,-1, 0)", "( 0,-1, 1)",
-        "( 0, 0,-1)"]
-
-#  Read input.
-data = pl.loadtxt( infile )
-full_step = data[0,:]
-
-#  Do we have an MPI file?
-full_step = data[0,:]
-if full_step.size == 13:
-    print "# MPI mode"
-    mpimode = True
-    nranks = int(max(data[:,0])) + 1
-    print "# Number of ranks:", nranks
-    rankcol = 0
-    threadscol = 1
-    taskcol = 2
-    subtaskcol = 3
-    ticcol = 5
-    toccol = 6
-    updates = int(full_step[7])
-    g_updates = int(full_step[8])
-    s_updates = int(full_step[9])
-else:
-    print "# non MPI mode"
-    nranks = 1
-    mpimode = False
-    rankcol = -1
-    threadscol = 0
-    taskcol = 1
-    subtaskcol = 2
-    ticcol = 4
-    toccol = 5
-    updates = int(full_step[6])
-    g_updates = int(full_step[7])
-    s_updates = int(full_step[8])
-
-#  Get the CPU clock to convert ticks into milliseconds.
-CPU_CLOCK = float(full_step[-1]) / 1000.0
-if args.verbose:
-    print "# CPU frequency:", CPU_CLOCK * 1000.0
-print "#   updates:", updates
-print "# g_updates:", g_updates
-print "# s_updates:", s_updates
-
-if mpimode:
-    if args.rank == "all":
-        ranks = range(nranks)
-    else:
-        ranks = [int(args.rank)]
-        if ranks[0] >= nranks:
-            print "Error: maximum rank is " + str(nranks - 1)
-            sys.exit(1)
-else:
-    ranks = [1]
-
-maxthread = int(max(data[:,threadscol])) + 1
-print "# Maximum thread id:", maxthread
-
-#  Avoid start and end times of zero.
-sdata = data[data[:,ticcol] != 0]
-sdata = data[data[:,toccol] != 0]
-
-#  Now we process the required ranks.
-for rank in ranks:
-    if mpimode:
-        print "# Rank", rank
-        data = sdata[sdata[:,rankcol] == rank]
-        full_step = data[0,:]
-    else:
-        data = sdata
-
-    #  Recover the start and end time
-    tic_step = int(full_step[ticcol])
-    toc_step = int(full_step[toccol])
-    data = data[1:,:]
-
-    #  Avoid start and end times of zero.
-    data = data[data[:,ticcol] != 0]
-    data = data[data[:,toccol] != 0]
-
-    #  Calculate the time range.
-    total_t = (toc_step - tic_step)/ CPU_CLOCK
-    print "# Data range: ", total_t, "ms"
-    print
-
-    #  Correct times to relative values.
-    start_t = float(tic_step)
-    data[:,ticcol] -= start_t
-    data[:,toccol] -= start_t
-    end_t = (toc_step - start_t) / CPU_CLOCK
-
-    tasks = {}
-    tasks[-1] = []
-    for i in range(maxthread):
-        tasks[i] = []
-
-    #  Gather into by thread data.
-    num_lines = pl.shape(data)[0]
-    for line in range(num_lines):
-        thread = int(data[line,threadscol])
-        tic = int(data[line,ticcol]) / CPU_CLOCK
-        toc = int(data[line,toccol]) / CPU_CLOCK
-        tasktype = int(data[line,taskcol])
-        subtype = int(data[line,subtaskcol])
-        sid = int(data[line, -1])
-
-        tasks[thread].append([tic,toc,tasktype,subtype, sid])
-
-    #  Sort by tic and gather used threads.
-    threadids = []
-    for i in range(maxthread):
-        tasks[i] = sorted(tasks[i], key=lambda task: task[0])
-        threadids.append(i)
-
-    #  Times per task.
-    print "# Task times:"
-    print "# -----------"
-    print "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
-          .format("type/subtype", "count","minimum", "maximum",
-                  "sum", "mean", "percent")
-
-    alltasktimes = {}
-    sidtimes = {}
-    for i in threadids:
-        tasktimes = {}
-        for task in tasks[i]:
-            key = TASKTYPES[task[2]] + "/" + SUBTYPES[task[3]]
-            dt = task[1] - task[0]
-            if not key in tasktimes:
-                tasktimes[key] = []
-            tasktimes[key].append(dt)
-
-            if not key in alltasktimes:
-                alltasktimes[key] = []
-            alltasktimes[key].append(dt)
-
-            my_sid = task[4]
-            if my_sid > -1:
-                if not my_sid in sidtimes:
-                    sidtimes[my_sid] = []
-                sidtimes[my_sid].append(dt)
-
-        print "# Thread : ", i
-        for key in sorted(tasktimes.keys()):
-            taskmin = min(tasktimes[key])
-            taskmax = max(tasktimes[key])
-            tasksum = sum(tasktimes[key])
-            print "{0:19s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-                  .format(key, len(tasktimes[key]), taskmin, taskmax, tasksum,
-                          tasksum / len(tasktimes[key]), tasksum / total_t * 100.0)
-        print
-
-    print "# All threads : "
-    for key in sorted(alltasktimes.keys()):
-        taskmin = min(alltasktimes[key])
-        taskmax = max(alltasktimes[key])
-        tasksum = sum(alltasktimes[key])
-        print "{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-              .format(key, len(alltasktimes[key]), taskmin, taskmax, tasksum,
-                      tasksum / len(alltasktimes[key]),
-                      tasksum / (len(threadids) * total_t) * 100.0)
-    print
-
-    # For pairs, show stuff sorted by SID
-    print "# By SID (all threads): "
-    print "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
-        .format("Pair/Sub-pair SID", "count","minimum", "maximum",
-                "sum", "mean", "percent")
-
-    for sid in range(0,13):
-        if sid in sidtimes:
-            sidmin = min(sidtimes[sid])
-            sidmax = max(sidtimes[sid])
-            sidsum = sum(sidtimes[sid])
-            sidcount = len(sidtimes[sid])
-            sidmean = sidsum / sidcount
-        else:
-            sidmin = 0.
-            sidmax = 0.
-            sidsum = 0.
-            sidcount = 0
-            sidmean = 0.
-        print "{0:3d} {1:15s}: {2:7d} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.4f} {7:9.2f}"\
-            .format(sid, SIDS[sid], sidcount, sidmin, sidmax, sidsum,
-                    sidmean, sidsum / (len(threadids) * total_t) * 100.0)
-    print
-
-    #  Dead times.
-    print "# Times not in tasks (deadtimes)"
-    print "# ------------------------------"
-    print "# Time before first task:"
-    print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
-    predeadtimes = []
-    for i in threadids:
-        if len(tasks[i]) > 0:
-            predeadtime = tasks[i][0][0]
-            print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-                  .format(i, predeadtime, predeadtime / total_t * 100.0)
-            predeadtimes.append(predeadtime)
-        else:
-            predeadtimes.append(0.0)
-
-    predeadmin = min(predeadtimes)
-    predeadmax = max(predeadtimes)
-    predeadsum = sum(predeadtimes)
-    print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-          .format(len(predeadtimes), predeadmin, predeadmax, predeadsum,
-                  predeadsum / len(predeadtimes),
-                  predeadsum / (len(threadids) * total_t ) * 100.0)
-    print
-
-    print "# Time after last task:"
-    print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
-    postdeadtimes = []
-    for i in threadids:
-        if len(tasks[i]) > 0:
-            postdeadtime = total_t - tasks[i][-1][1]
-            print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-                  .format(i, postdeadtime, postdeadtime / total_t * 100.0)
-            postdeadtimes.append(postdeadtime)
-        else:
-            postdeadtimes.append(0.0)
-
-    postdeadmin = min(postdeadtimes)
-    postdeadmax = max(postdeadtimes)
-    postdeadsum = sum(postdeadtimes)
-    print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-          .format(len(postdeadtimes), postdeadmin, postdeadmax, postdeadsum,
-                  postdeadsum / len(postdeadtimes),
-                  postdeadsum / (len(threadids) * total_t ) * 100.0)
-    print
-
-    #  Time in engine, i.e. from first to last tasks.
-    print "# Time between tasks (engine deadtime):"
-    print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    enginedeadtimes = []
-    for i in threadids:
-        deadtimes = []
-        if len(tasks[i]) > 0:
-            last = tasks[i][0][0]
-        else:
-            last = 0.0
-        for task in tasks[i]:
-            dt = task[0] - last
-            deadtimes.append(dt)
-            last = task[1]
-
-        #  Drop first value, last value already gone.
-        if len(deadtimes) > 1:
-            deadtimes = deadtimes[1:]
-        else:
-            #  Only one or fewer tasks, so no deadtime by definition.
-            deadtimes = [0.0]
-
-        deadmin = min(deadtimes)
-        deadmax = max(deadtimes)
-        deadsum = sum(deadtimes)
-        print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-              .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                      deadsum / len(deadtimes), deadsum / total_t * 100.0)
-        enginedeadtimes.extend(deadtimes)
-
-    deadmin = min(enginedeadtimes)
-    deadmax = max(enginedeadtimes)
-    deadsum = sum(enginedeadtimes)
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-          .format(len(enginedeadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(enginedeadtimes),
-                  deadsum / (len(threadids) * total_t ) * 100.0)
-    print
-
-    #  All times in step.
-    print "# All deadtimes:"
-    print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-          .format("count", "minimum", "maximum", "sum", "mean", "percent")
-    alldeadtimes = []
-    for i in threadids:
-        deadtimes = []
-        last = 0
-        for task in tasks[i]:
-            dt = task[0] - last
-            deadtimes.append(dt)
-            last = task[1]
-        dt = total_t - last
-        deadtimes.append(dt)
-
-        deadmin = min(deadtimes)
-        deadmax = max(deadtimes)
-        deadsum = sum(deadtimes)
-        print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-              .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
-        alldeadtimes.extend(deadtimes)
-
-    deadmin = min(alldeadtimes)
-    deadmax = max(alldeadtimes)
-    deadsum = sum(alldeadtimes)
-    print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-          .format(len(alldeadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(alldeadtimes),
-                  deadsum / (len(threadids) * total_t ) * 100.0)
-    print
-
-sys.exit(0)
diff --git a/examples/check_ngbs.py b/examples/check_ngbs.py
deleted file mode 100644
index a4a07ce7bd6ffb817e8106b74d9895a0edbceca7..0000000000000000000000000000000000000000
--- a/examples/check_ngbs.py
+++ /dev/null
@@ -1,321 +0,0 @@
-import h5py as h
-import numpy as np
-import matplotlib
-matplotlib.use("Agg")
-from pylab import *
-import os.path
-
-kernel_gamma = 1.825742
-kernel_gamma2 = kernel_gamma * kernel_gamma
-kernel_gamma_dim = np.power(kernel_gamma,3)
-hydro_dimension_unit_sphere = 4. * np.pi / 3.
-kernel_norm = hydro_dimension_unit_sphere * kernel_gamma_dim
-error = False
-
-inputFile1 = ""
-inputFile2 = ""
-
-# Compare the values of two floats
-def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
-    return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
-
-# Check list of density neighbours and check that they are correct.
-def check_density_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
-        h_naive, h_sort, num_invalid, acc):
-
-    for k in range(0,num_invalid):
-
-        # Filter neighbour lists for valid particle ids
-        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
-        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
-
-        # Check neighbour lists for differences
-        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
-       
-        # Check for duplicate IDs
-        duplicate_check_naive = len(filter_neigh_naive) != len(set(filter_neigh_naive))
-        duplicate_check_sort = len(filter_neigh_sort) != len(set(filter_neigh_sort))
-
-        if duplicate_check_naive:
-            print "Duplicate neighbour ID found in: ", inputFile1
-            print filter_neigh_naive
-            return True
-        
-        if duplicate_check_sort:
-            print "Duplicate neighbour ID found in: ", inputFile2
-            print filter_neigh_sort
-            return True
-
-        pid = pids[mask][k]
-
-        # Loop over discrepancies and check if they are actually neighbours
-        for pjd in id_list:
-            pi_pos = pos[np.where(pids == pid)]
-            pj_pos = pos[np.where(pids == pjd)]
-            
-            hi = h_naive[np.where(pids == pid)]
-            
-            dx = pi_pos[0][0] - pj_pos[0][0]
-            dy = pi_pos[0][1] - pj_pos[0][1]
-            dz = pi_pos[0][2] - pj_pos[0][2]
-           
-            # Correct for BCs
-            dx = nearest(dx)
-            dy = nearest(dy)
-            dz = nearest(dz)
-
-            r2 = dx*dx + dy*dy + dz*dz
-            
-            hig2 = hi*hi*kernel_gamma2
-            
-            diff = abs(r2 - hig2)
-            
-            print "Particle {} is missing {}, hig2: {}, r2: {}, |r2 - hig2|: {}".format(pid,pjd,hig2, r2, diff)
-            
-            if diff < acc * hig2:
-                print "Missing interaction due to precision issue will be ignored."
-            else:
-                hi_2 = h_sort[np.where(pids == pid)]
-
-                # If a neigbour is missing and the particle has the same h throw
-                # an error.
-                if(isclose(hi,hi_2)):
-                    print "Missing interaction found but particle has the same smoothing length (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-                    return True
-                else:
-                    print "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-
-    return False
-
-# Check list of force neighbours and check that they are correct.
-def check_force_neighbours(pids, ngb_ids_naive, ngb_ids_sort, mask, pos,
-        h_naive, h_sort, num_invalid, acc):
-
-    error_val = False
-
-    for k in range(0,num_invalid):
-
-        # Filter neighbour lists for valid particle ids
-        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
-        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
-
-        # Check neighbour lists for differences
-        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
-        
-        pid = pids[mask][k]
-
-        # Loop over discrepancies and check if they are actually neighbours
-        for pjd in id_list:
-            pi_pos = pos[np.where(pids == pid)]
-            pj_pos = pos[np.where(pids == pjd)]
-            
-            hi = h_naive[np.where(pids == pid)]
-            hj = h_naive[np.where(pids == pjd)]
-            
-            dx = pi_pos[0][0] - pj_pos[0][0]
-            dy = pi_pos[0][1] - pj_pos[0][1]
-            dz = pi_pos[0][2] - pj_pos[0][2]
- 
-            # Correct for BCs
-            dx = nearest(dx)
-            dy = nearest(dy)
-            dz = nearest(dz)
-           
-            r2 = dx*dx + dy*dy + dz*dz
-            
-            hig2 = hi*hi*kernel_gamma2
-            hjg2 = hj*hj*kernel_gamma2
-            
-            diff = abs(r2 - max(hig2, hjg2))
-            
-            print "Particle {} is missing {}, hig2: {}, hjg2: {}, r2: {}, |r2 - max(hig2,hjg2)|: {}".format(pid,pjd,hig2, hjg2, r2, diff)
-
-            if diff < acc * max(hig2,hjg2):
-                print "Missing interaction due to precision issue will be ignored."
-            else:
-                hi_2 = h_sort[np.where(pids == pid)]
-                if(isclose(hi,hi_2)):
-                    print "Missing interaction due to the same smoothing lengths will not be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-                    error_val = True
-                else:
-                    print "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."%(hi, hi_2)
-
-    return error_val
-
-def nearest(dx):
-    if(dx > 0.5 * box_size):
-        return dx - box_size
-    elif(dx < -0.5 * box_size):
-        return dx + box_size
-    else: 
-        return dx
-
-# Parse command line arguments
-if len(sys.argv) < 3:
-    print "Error: pass input files as arguments"
-    sys.exit()
-else:
-    inputFile1 = sys.argv[1]
-    inputFile2 = sys.argv[2]
-    if os.path.exists(inputFile1) != 1:
-        print "\n{} does not exist!\n".format(inputFile1)
-        sys.exit()
-    if os.path.exists(inputFile2) != 1:
-        print "\n{} does not exist!\n".format(inputFile2)
-        sys.exit()
-
-# Open input files    
-file_naive = h.File(inputFile1, "r")
-file_sort = h.File(inputFile2, "r")
-
-box_size = file_naive["/Header"].attrs["BoxSize"][0]
-
-# Read input file fields
-ids_naive = file_naive["/PartType0/ParticleIDs"][:]
-ids_sort = file_sort["/PartType0/ParticleIDs"][:]
-
-h_naive = file_naive["/PartType0/SmoothingLength"][:]
-h_sort = file_sort["/PartType0/SmoothingLength"][:]
-
-pos_naive = file_naive["/PartType0/Coordinates"][:,:]
-#pos_sort = file_sort["/PartType0/Coordinates"][:,:]
-
-num_density_naive = file_naive["/PartType0/Num_ngb_density"][:]
-num_density_sort = file_sort["/PartType0/Num_ngb_density"][:]
-
-num_force_naive = file_naive["/PartType0/Num_ngb_force"][:]
-num_force_sort = file_sort["/PartType0/Num_ngb_force"][:]
-
-neighbour_ids_density_naive = file_naive["/PartType0/Ids_ngb_density"][:]
-neighbour_ids_density_sort = file_sort["/PartType0/Ids_ngb_density"][:]
-
-neighbour_ids_force_naive = file_naive["/PartType0/Ids_ngb_force"][:]
-neighbour_ids_force_sort = file_sort["/PartType0/Ids_ngb_force"][:]
-
-
-#wcount_naive = file_naive["/PartType0/Wcount"][:]
-#wcount_sort = file_sort["/PartType0/Wcount"][:]
-#
-#wcount_naive = wcount_naive * np.power(h_naive,3) * kernel_norm
-#wcount_sort = wcount_sort * np.power(h_sort,3) * kernel_norm
-
-# Cross check
-max_density_ngbs_naive = np.max(num_density_naive)
-max_density_ngbs_sort = np.max(num_density_sort)
-max_force_ngbs_naive = np.max(num_force_naive)
-max_force_ngbs_sort = np.max(num_force_sort)
-
-print "                   Min     Mean     Max "
-print "                   ---------------------"
-print "Ngbs density naiv: ", np.min(num_density_naive), np.mean(num_density_naive), max_density_ngbs_naive
-print "Ngbs density sort: ", np.min(num_density_sort), np.mean(num_density_sort), max_density_ngbs_sort
-print "Ngbs force naiv:   ", np.min(num_force_naive), np.mean(num_force_naive), max_force_ngbs_naive
-print "Ngbs force sort:   ", np.min(num_force_sort), np.mean(num_force_sort), max_force_ngbs_sort
-#print "Wcount naiv:   ", np.min(wcount_naive), np.mean(wcount_naive), np.max(wcount_naive)
-#print "Wcount sort:   ", np.min(wcount_sort), np.mean(wcount_sort), np.max(wcount_sort)
-
-# Sort
-index_naive = np.argsort(ids_naive)
-index_sort = np.argsort(ids_sort)
-
-num_density_naive = num_density_naive[index_naive]
-num_density_sort = num_density_sort[index_sort]
-num_force_naive = num_force_naive[index_naive]
-num_force_sort = num_force_sort[index_sort]
-ids_naive = ids_naive[index_naive]
-ids_sort = ids_sort[index_sort]
-neighbour_ids_density_naive = neighbour_ids_density_naive[index_naive]
-neighbour_ids_density_sort = neighbour_ids_density_sort[index_sort]
-neighbour_ids_force_naive = neighbour_ids_force_naive[index_naive]
-neighbour_ids_force_sort = neighbour_ids_force_sort[index_sort]
-#wcount_naive = wcount_naive[index_naive]
-#wcount_sort = wcount_sort[index_sort]
-h_naive = h_naive[index_naive]
-h_sort = h_sort[index_sort]
-pos_naive = pos_naive[index_naive]
-#pos_sort = pos_sort[index_sort]
-
-neighbour_length_naive = len(neighbour_ids_density_naive[0])
-neighbour_length_sort = len(neighbour_ids_density_sort[0])
-
-# Check that input files are logging the same number of neighbours
-if neighbour_length_naive != neighbour_length_sort:
-    print "Input files have logged different numbers of neighbour lengths!"
-    print "{} has logged: {} neighbours".format(inputFile1, neighbour_length_naive)
-    print "{} has logged: {} neighbours".format(inputFile2, neighbour_length_sort)
-    exit(1)
-
-if (max_density_ngbs_naive > neighbour_length_naive or max_force_ngbs_naive > neighbour_length_naive or
-    max_density_ngbs_sort > neighbour_length_sort or max_force_ngbs_sort > neighbour_length_sort):
-    print "The number of neighbours has exceeded the number of neighbours logged."
-    print "Modify NUM_OF_NEIGHBOURS in hydro_part.h to log more neighbours."
-    print "The highest neighbour count is: ", max(max_density_ngbs_naive,max_force_ngbs_naive, max_density_ngbs_sort,max_force_ngbs_sort)
-    exit(1)
-
-# First check
-print "\n                         Min    Max"
-print "                         ----------"
-print "Differences for density:  ", min(num_density_naive - num_density_sort), max(num_density_naive - num_density_sort)
-print "Differences for force:    ", min(num_force_naive - num_force_sort), max(num_force_naive - num_force_sort)
-
-# Get the IDs that are different
-mask_density = num_density_naive != num_density_sort
-mask_force = num_force_naive != num_force_sort
-num_invalid_density = np.sum(mask_density)
-num_invalid_force = np.sum(mask_force)
-
-print "\nNum non-zero density: ", num_invalid_density
-print "Num non-zero force:   ", num_invalid_force
-
-print "\nParticle IDs with incorrect densities"
-print "----------------------------------------"
-print ids_naive[mask_density]
-
-# Check density neighbour lists
-error += check_density_neighbours(ids_naive, neighbour_ids_density_naive,
-        neighbour_ids_density_sort, mask_density, pos_naive, h_naive, h_sort,
-        num_invalid_density, 2e-6)
-
-print "Num of density interactions", inputFile1
-print num_density_naive[mask_density]
-
-print "Num of density interactions", inputFile2
-print num_density_sort[mask_density]
-
-print "\nParticle IDs with incorrect forces"
-print "------------------------------------"
-print ids_naive[mask_force]
-
-# Check force neighbour lists
-error += check_force_neighbours(ids_naive, neighbour_ids_force_naive,
-        neighbour_ids_force_sort, mask_force, pos_naive, h_naive, h_sort,
-        num_invalid_force, 2e-6)
-
-print "Num of force interactions", inputFile1
-print num_force_naive[mask_force]
-
-#print "Smoothing lengths", inputFile1
-#print h_naive[mask_force]
-
-print "Num of force interactions", inputFile2
-print num_force_sort[mask_force]
-
-#print "Smoothing lengths", inputFile2
-#print h_sort[mask_force]
-
-# Statistics of h difference
-h_relative = (h_naive - h_sort) / h_naive
-print "h statistics: {} {} (Min, 1st Percentile)".format(np.min(h_relative), np.percentile(h_relative,1))
-print "h statistics: {} {} (Mean, Median)".format(np.mean(h_relative), np.median(h_relative))
-print "h statistics: {} {} (Max, 99th Percentile)".format(np.max(h_relative), np.percentile(h_relative, 99))
-
-if error:
-    print "\n------------------"
-    print "Differences found."
-    print "------------------"
-    exit(1)
-else:
-    print "\n---------------------"
-    print "No differences found."
-    print "---------------------"
-    exit(0)
diff --git a/examples/main.c b/examples/main.c
index 221b20f752be90832e012e1b1df6db4c206d20e8..79d002594eb6255163c9eeff88a89dd6cab510c7 100644
--- a/examples/main.c
+++ b/examples/main.c
@@ -41,6 +41,7 @@
 #endif
 
 /* Local headers. */
+#include "argparse.h"
 #include "swift.h"
 
 /* Engine policy flags. */
@@ -51,69 +52,27 @@
 /* Global profiler. */
 struct profiler prof;
 
-/**
- * @brief Help messages for the command line parameters.
- */
-void print_help_message(void) {
-
-  printf("\nUsage: swift [OPTION]... PARAMFILE\n");
-  printf("       swift_mpi [OPTION]... PARAMFILE\n\n");
-
-  printf("Valid options are:\n");
-  printf("  %2s %14s %s\n", "-a", "", "Pin runners using processor affinity.");
-  printf("  %2s %14s %s\n", "-c", "",
-         "Run with cosmological time integration.");
-  printf("  %2s %14s %s\n", "-C", "", "Run with cooling.");
-  printf(
-      "  %2s %14s %s\n", "-d", "",
-      "Dry run. Read the parameter file, allocate memory but does not read ");
-  printf(
-      "  %2s %14s %s\n", "", "",
-      "the particles from ICs and exit before the start of time integration.");
-  printf("  %2s %14s %s\n", "", "",
-         "Allows user to check validy of parameter and IC files as well as "
-         "memory limits.");
-  printf("  %2s %14s %s\n", "-D", "",
-         "Always drift all particles even the ones far from active particles. "
-         "This emulates");
-  printf("  %2s %14s %s\n", "", "",
-         "Gadget-[23] and GIZMO's default behaviours.");
-  printf("  %2s %14s %s\n", "-e", "",
-         "Enable floating-point exceptions (debugging mode).");
-  printf("  %2s %14s %s\n", "-f", "{int}",
-         "Overwrite the CPU frequency (Hz) to be used for time measurements.");
-  printf("  %2s %14s %s\n", "-g", "",
-         "Run with an external gravitational potential.");
-  printf("  %2s %14s %s\n", "-G", "", "Run with self-gravity.");
-  printf("  %2s %14s %s\n", "-M", "",
-         "Reconstruct the multipoles every time-step.");
-  printf("  %2s %14s %s\n", "-n", "{int}",
-         "Execute a fixed number of time steps. When unset use the time_end "
-         "parameter to stop.");
-  printf("  %2s %14s %s\n", "-o", "{str}",
-         "Generate a default output parameter file.");
-  printf("  %2s %14s %s\n", "-P", "{sec:par:val}",
-         "Set parameter value and overwrites values read from the parameters "
-         "file. Can be used more than once.");
-  printf("  %2s %14s %s\n", "-r", "", "Continue using restart files.");
-  printf("  %2s %14s %s\n", "-s", "", "Run with hydrodynamics.");
-  printf("  %2s %14s %s\n", "-S", "", "Run with stars.");
-  printf("  %2s %14s %s\n", "-t", "{int}",
-         "The number of threads to use on each MPI rank. Defaults to 1 if not "
-         "specified.");
-  printf("  %2s %14s %s\n", "-T", "", "Print timers every time-step.");
-  printf("  %2s %14s %s\n", "-v", "[12]", "Increase the level of verbosity:");
-  printf("  %2s %14s %s\n", "", "", "1: MPI-rank 0 writes,");
-  printf("  %2s %14s %s\n", "", "", "2: All MPI-ranks write.");
-  printf("  %2s %14s %s\n", "-x", "", "Run with structure finding.");
-  printf("  %2s %14s %s\n", "-y", "{int}",
-         "Time-step frequency at which task graphs are dumped.");
-  printf("  %2s %14s %s\n", "-Y", "{int}",
-         "Time-step frequency at which threadpool tasks are dumped.");
-  printf("  %2s %14s %s\n", "-h", "", "Print this help message and exit.");
-  printf(
-      "\nSee the file parameter_example.yml for an example of "
-      "parameter file.\n");
+/*  Usage string. */
+static const char *const swift_usage[] = {
+    "swift [options] [[--] param-file]",
+    "swift [options] param-file",
+    "swift_mpi [options] [[--] param-file]",
+    "swift_mpi [options] param-file",
+    NULL,
+};
+
+/* Function to handle multiple -P arguments. */
+struct cmdparams {
+  const char *param[PARSER_MAX_NO_OF_PARAMS];
+  int nparam;
+};
+
+static int handle_cmdparam(struct argparse *self,
+                           const struct argparse_option *opt) {
+  struct cmdparams *cmdps = (struct cmdparams *)opt->data;
+  cmdps->param[cmdps->nparam] = *(char **)opt->value;
+  cmdps->nparam++;
+  return 1;
 }
 
 /**
@@ -131,13 +90,15 @@ int main(int argc, char *argv[]) {
   struct cooling_function_data cooling_func;
   struct cosmology cosmo;
   struct external_potential potential;
+  struct star_formation starform;
   struct pm_mesh mesh;
   struct gpart *gparts = NULL;
   struct gravity_props gravity_properties;
   struct hydro_props hydro_properties;
+  struct stars_props stars_properties;
+  struct entropy_floor_properties entropy_floor;
   struct part *parts = NULL;
   struct phys_const prog_const;
-  struct sourceterms sourceterms;
   struct space s;
   struct spart *sparts = NULL;
   struct unit_system us;
@@ -188,11 +149,14 @@ int main(int argc, char *argv[]) {
   int restart = 0;
   int with_cosmology = 0;
   int with_external_gravity = 0;
-  int with_sourceterms = 0;
+  int with_temperature = 0;
   int with_cooling = 0;
   int with_self_gravity = 0;
   int with_hydro = 0;
   int with_stars = 0;
+  int with_star_formation = 0;
+  int with_feedback = 0;
+  int with_limiter = 0;
   int with_fp_exceptions = 0;
   int with_drift_all = 0;
   int with_mpole_reconstruction = 0;
@@ -200,188 +164,203 @@ int main(int argc, char *argv[]) {
   int verbose = 0;
   int nr_threads = 1;
   int with_verbose_timers = 0;
-  int nparams = 0;
-  char output_parameters_filename[200] = "";
-  char *cmdparams[PARSER_MAX_NO_OF_PARAMS];
-  char paramFileName[200] = "";
+  char *output_parameters_filename = NULL;
+  char *cpufreqarg = NULL;
+  char *param_filename = NULL;
   char restart_file[200] = "";
   unsigned long long cpufreq = 0;
+  struct cmdparams cmdps;
+  cmdps.nparam = 0;
+  cmdps.param[0] = NULL;
+  char *buffer = NULL;
+
+  /* Parse the command-line parameters. */
+  struct argparse_option options[] = {
+      OPT_HELP(),
+
+      OPT_GROUP("  Simulation options:\n"),
+      OPT_BOOLEAN('b', "feedback", &with_feedback, "Run with stars feedback.",
+                  NULL, 0, 0),
+      OPT_BOOLEAN('c', "cosmology", &with_cosmology,
+                  "Run with cosmological time integration.", NULL, 0, 0),
+      OPT_BOOLEAN(0, "temperature", &with_temperature,
+                  "Run with temperature calculation.", NULL, 0, 0),
+      OPT_BOOLEAN('C', "cooling", &with_cooling,
+                  "Run with cooling (also switches on --with-temperature).",
+                  NULL, 0, 0),
+      OPT_BOOLEAN('D', "drift-all", &with_drift_all,
+                  "Always drift all particles even the ones far from active "
+                  "particles. This emulates Gadget-[23] and GIZMO's default "
+                  "behaviours.",
+                  NULL, 0, 0),
+      OPT_BOOLEAN('F', "star-formation", &with_star_formation,
+                  "Run with star formation.", NULL, 0, 0),
+      OPT_BOOLEAN('g', "external-gravity", &with_external_gravity,
+                  "Run with an external gravitational potential.", NULL, 0, 0),
+      OPT_BOOLEAN('G', "self-gravity", &with_self_gravity,
+                  "Run with self-gravity.", NULL, 0, 0),
+      OPT_BOOLEAN('M', "multipole-reconstruction", &with_mpole_reconstruction,
+                  "Reconstruct the multipoles every time-step.", NULL, 0, 0),
+      OPT_BOOLEAN('s', "hydro", &with_hydro, "Run with hydrodynamics.", NULL, 0,
+                  0),
+      OPT_BOOLEAN('S', "stars", &with_stars, "Run with stars.", NULL, 0, 0),
+      OPT_BOOLEAN('x', "velociraptor", &with_structure_finding,
+                  "Run with structure finding.", NULL, 0, 0),
+      OPT_BOOLEAN(0, "limiter", &with_limiter, "Run with time-step limiter.",
+                  NULL, 0, 0),
+
+      OPT_GROUP("  Control options:\n"),
+      OPT_BOOLEAN('a', "pin", &with_aff,
+                  "Pin runners using processor affinity.", NULL, 0, 0),
+      OPT_BOOLEAN('d', "dry-run", &dry_run,
+                  "Dry run. Read the parameter file, allocates memory but does "
+                  "not read the particles from ICs. Exits before the start of "
+                  "time integration. Checks the validity of parameters and IC "
+                  "files as well as memory limits.",
+                  NULL, 0, 0),
+      OPT_BOOLEAN('e', "fpe", &with_fp_exceptions,
+                  "Enable floating-point exceptions (debugging mode).", NULL, 0,
+                  0),
+      OPT_STRING('f', "cpu-frequency", &cpufreqarg,
+                 "Overwrite the CPU "
+                 "frequency (Hz) to be used for time measurements.",
+                 NULL, 0, 0),
+      OPT_INTEGER('n', "steps", &nsteps,
+                  "Execute a fixed number of time steps. When unset use the "
+                  "time_end parameter to stop.",
+                  NULL, 0, 0),
+      OPT_STRING('o', "output-params", &output_parameters_filename,
+                 "Generate a default output parameter file.", NULL, 0, 0),
+      OPT_STRING('P', "param", &buffer,
+                 "Set parameter value, overiding the value read from the "
+                 "parameter file. Can be used more than once {sec:par:value}.",
+                 handle_cmdparam, (intptr_t)&cmdps, 0),
+      OPT_BOOLEAN('r', "restart", &restart, "Continue using restart files.",
+                  NULL, 0, 0),
+      OPT_INTEGER('t', "threads", &nr_threads,
+                  "The number of threads to use on each MPI rank. Defaults to "
+                  "1 if not specified.",
+                  NULL, 0, 0),
+      OPT_INTEGER('T', "timers", &with_verbose_timers,
+                  "Print timers every time-step.", NULL, 0, 0),
+      OPT_INTEGER('v', "verbose", &verbose,
+                  "Run in verbose mode, in MPI mode 2 outputs from all ranks.",
+                  NULL, 0, 0),
+      OPT_INTEGER('y', "task-dumps", &dump_tasks,
+                  "Time-step frequency at which task graphs are dumped.", NULL,
+                  0, 0),
+      OPT_INTEGER('Y', "threadpool-dumps", &dump_threadpool,
+                  "Time-step frequency at which threadpool tasks are dumped.",
+                  NULL, 0, 0),
+      OPT_END(),
+  };
+  struct argparse argparse;
+  argparse_init(&argparse, options, swift_usage, 0);
+  argparse_describe(&argparse, "\nParameters:",
+                    "\nSee the file examples/parameter_example.yml for an "
+                    "example of parameter file.");
+  int nargs = argparse_parse(&argparse, argc, (const char **)argv);
+
+  /* Need a parameter file. */
+  if (nargs != 1) {
+    if (myrank == 0) argparse_usage(&argparse);
+    printf("\nError: no parameter file was supplied.\n");
+    return 1;
+  }
+  param_filename = argv[0];
 
-  /* Parse the parameters */
-  int c;
-  while ((c = getopt(argc, argv, "acCdDef:FgGhMn:o:P:rsSt:Tv:xy:Y:")) != -1)
-    switch (c) {
-      case 'a':
-#if defined(HAVE_SETAFFINITY) && defined(HAVE_LIBNUMA)
-        with_aff = 1;
-#else
-        error("Need NUMA support for thread affinity");
+  /* Checks of options. */
+#if !defined(HAVE_SETAFFINITY) || !defined(HAVE_LIBNUMA)
+  if (with_aff) {
+    printf("Error: no NUMA support for thread affinity\n");
+    return 1;
+  }
 #endif
-        break;
-      case 'c':
-        with_cosmology = 1;
-        break;
-      case 'C':
-        with_cooling = 1;
-        break;
-      case 'd':
-        dry_run = 1;
-        break;
-      case 'D':
-        with_drift_all = 1;
-        break;
-      case 'e':
-#ifdef HAVE_FE_ENABLE_EXCEPT
-        with_fp_exceptions = 1;
-#else
-        error("Need support for floating point exception on this platform");
+
+#ifndef HAVE_FE_ENABLE_EXCEPT
+  if (with_fp_exceptions) {
+    printf("Error: no support for floating point exceptions\n");
+    return 1;
+  }
 #endif
-        break;
-      case 'f':
-        if (sscanf(optarg, "%llu", &cpufreq) != 1) {
-          if (myrank == 0) printf("Error parsing CPU frequency (-f).\n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
-        break;
-      case 'F':
-        with_sourceterms = 1;
-        break;
-      case 'g':
-        with_external_gravity = 1;
-        break;
-      case 'G':
-        with_self_gravity = 1;
-        break;
-      case 'h':
-        if (myrank == 0) print_help_message();
-        return 0;
-      case 'M':
-        with_mpole_reconstruction = 1;
-        break;
-      case 'n':
-        if (sscanf(optarg, "%d", &nsteps) != 1) {
-          if (myrank == 0) printf("Error parsing fixed number of steps.\n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
-        break;
-      case 'o':
-        if (sscanf(optarg, "%s", output_parameters_filename) != 1) {
-          if (myrank == 0) {
-            printf("Error parsing output fields filename");
-            print_help_message();
-          }
-          return 1;
-        }
-        break;
-      case 'P':
-        cmdparams[nparams] = optarg;
-        nparams++;
-        break;
-      case 'r':
-        restart = 1;
-        break;
-      case 's':
-        with_hydro = 1;
-        break;
-      case 'S':
-        with_stars = 1;
-        break;
-      case 't':
-        if (sscanf(optarg, "%d", &nr_threads) != 1) {
-          if (myrank == 0)
-            printf("Error parsing the number of threads (-t).\n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
-        break;
-      case 'T':
-        with_verbose_timers = 1;
-        break;
-      case 'v':
-        if (sscanf(optarg, "%d", &verbose) != 1) {
-          if (myrank == 0) printf("Error parsing verbosity level (-v).\n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
-        break;
-      case 'x':
-#ifdef HAVE_VELOCIRAPTOR
-        with_structure_finding = 1;
-#else
-        error(
-            "Error: (-x) needs to have the code compiled with VELOCIraptor "
-            "linked in.");
+
+#ifndef HAVE_VELOCIRAPTOR
+  if (with_structure_finding) {
+    printf("Error: VELOCIraptor is not available\n");
+    return 1;
+  }
 #endif
-        break;
-      case 'y':
-        if (sscanf(optarg, "%d", &dump_tasks) != 1) {
-          if (myrank == 0) printf("Error parsing dump_tasks (-y). \n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
+
 #ifndef SWIFT_DEBUG_TASKS
-        if (dump_tasks) {
-          error(
-              "Task dumping is only possible if SWIFT was configured with the "
-              "--enable-task-debugging option.");
-        }
+  if (dump_tasks) {
+    if (myrank == 0) {
+      message(
+          "WARNING: complete task dumps are only created when "
+          "configured with --enable-task-debugging.");
+      message("         Basic task statistics will be output.");
+    }
+  }
 #endif
-        break;
-      case 'Y':
-        if (sscanf(optarg, "%d", &dump_threadpool) != 1) {
-          if (myrank == 0) printf("Error parsing dump_threadpool (-Y). \n");
-          if (myrank == 0) print_help_message();
-          return 1;
-        }
+
 #ifndef SWIFT_DEBUG_THREADPOOL
-        if (dump_threadpool) {
-          error(
-              "Threadpool dumping is only possible if SWIFT was configured "
-              "with the "
-              "--enable-threadpool-debugging option.");
-        }
+  if (dump_threadpool) {
+    printf(
+        "Error: threadpool dumping is only possible if SWIFT was "
+        "configured with the --enable-threadpool-debugging option.\n");
+    return 1;
+  }
 #endif
-        break;
-      case '?':
-        if (myrank == 0) print_help_message();
-        return 1;
-        break;
+
+  /* The CPU frequency is a long long, so we need to parse that ourselves. */
+  if (cpufreqarg != NULL) {
+    if (sscanf(cpufreqarg, "%llu", &cpufreq) != 1) {
+      if (myrank == 0)
+        printf("Error parsing CPU frequency (%s).\n", cpufreqarg);
+      return 1;
     }
+  }
 
   /* Write output parameter file */
-  if (myrank == 0 && strcmp(output_parameters_filename, "") != 0) {
+  if (myrank == 0 && output_parameters_filename != NULL) {
     io_write_output_field_parameter(output_parameters_filename);
     printf("End of run.\n");
     return 0;
   }
 
-  /* check inputs */
-  if (optind == argc - 1) {
-    if (!strcpy(paramFileName, argv[optind++]))
-      error("Error reading parameter file name.");
-  } else if (optind > argc - 1) {
-    if (myrank == 0) printf("Error: A parameter file name must be provided\n");
-    if (myrank == 0) print_help_message();
+  if (!with_self_gravity && !with_hydro && !with_external_gravity) {
+    if (myrank == 0) {
+      argparse_usage(&argparse);
+      printf("\nError: At least one of -s, -g or -G must be chosen.\n");
+    }
     return 1;
-  } else {
-    if (myrank == 0) printf("Error: Too many parameters given\n");
-    if (myrank == 0) print_help_message();
+  }
+  if (with_stars && !with_external_gravity && !with_self_gravity) {
+    if (myrank == 0) {
+      argparse_usage(&argparse);
+      printf(
+          "\nError: Cannot process stars without gravity, -g or -G "
+          "must be chosen.\n");
+    }
     return 1;
   }
-  if (!with_self_gravity && !with_hydro && !with_external_gravity) {
-    if (myrank == 0)
-      printf("Error: At least one of -s, -g or -G must be chosen.\n");
-    if (myrank == 0) print_help_message();
+
+  if (!with_stars && with_feedback) {
+    if (myrank == 0) {
+      argparse_usage(&argparse);
+      printf(
+          "\nError: Cannot process feedback without stars, --stars must be "
+          "chosen.\n");
+    }
     return 1;
   }
-  if (with_stars && !with_external_gravity && !with_self_gravity) {
-    if (myrank == 0)
+
+  if (!with_hydro && with_feedback) {
+    if (myrank == 0) {
+      argparse_usage(&argparse);
       printf(
-          "Error: Cannot process stars without gravity, -g or -G must be "
+          "\nError: Cannot process feedback without gas, --hydro must be "
           "chosen.\n");
-    if (myrank == 0) print_help_message();
+    }
     return 1;
   }
 
@@ -471,15 +450,16 @@ int main(int argc, char *argv[]) {
       (struct swift_params *)malloc(sizeof(struct swift_params));
   if (params == NULL) error("Error allocating memory for the parameter file.");
   if (myrank == 0) {
-    message("Reading runtime parameters from file '%s'", paramFileName);
-    parser_read_file(paramFileName, params);
+    message("Reading runtime parameters from file '%s'", param_filename);
+    parser_read_file(param_filename, params);
 
     /* Handle any command-line overrides. */
-    if (nparams > 0) {
+    if (cmdps.nparam > 0) {
       message(
           "Overwriting values read from the YAML file with command-line "
           "values.");
-      for (int k = 0; k < nparams; k++) parser_set_param(params, cmdparams[k]);
+      for (int k = 0; k < cmdps.nparam; k++)
+        parser_set_param(params, cmdps.param[k]);
     }
   }
 #ifdef WITH_MPI
@@ -487,6 +467,22 @@ int main(int argc, char *argv[]) {
   MPI_Bcast(params, sizeof(struct swift_params), MPI_BYTE, 0, MPI_COMM_WORLD);
 #endif
 
+  /* Temporary early aborts for modes not supported over MPI. */
+#ifdef WITH_MPI
+  if (with_mpole_reconstruction && nr_nodes > 1)
+    error("Cannot reconstruct m-poles every step over MPI (yet).");
+  if (with_star_formation)
+    error("Can't run with star formation over MPI (yet)");
+  if (with_limiter) error("Can't run with time-step limiter over MPI (yet)");
+#endif
+
+    /* Temporary early aborts for modes not supported with hand-vec. */
+#if defined(WITH_VECTORIZATION) && !defined(CHEMISTRY_NONE)
+  error(
+      "Cannot run with chemistry and hand-vectorization (yet). "
+      "Use --disable-hand-vec at configure time.");
+#endif
+
   /* Check that we can write the snapshots by testing if the output
    * directory exists and is searchable and writable. */
   char basename[PARSER_MAX_LINE_SIZE];
@@ -497,8 +493,7 @@ int main(int argc, char *argv[]) {
   }
 
   /* Check that we can write the structure finding catalogues by testing if the
-   * output
-   * directory exists and is searchable and writable. */
+   * output directory exists and is searchable and writable. */
   if (with_structure_finding) {
     char stfbasename[PARSER_MAX_LINE_SIZE];
     parser_get_param_string(params, "StructureFinding:basename", stfbasename);
@@ -517,12 +512,22 @@ int main(int argc, char *argv[]) {
 
   /* Let's report what we did */
   if (myrank == 0) {
-    message("Using initial partition %s",
+#if defined(HAVE_PARMETIS)
+    if (reparttype.usemetis)
+      message("Using METIS serial partitioning:");
+    else
+      message("Using ParMETIS partitioning:");
+#elif defined(HAVE_METIS)
+    message("Using METIS serial partitioning:");
+#else
+    message("Non-METIS partitioning:");
+#endif
+    message("  initial partitioning: %s",
             initial_partition_name[initial_partition.type]);
     if (initial_partition.type == INITPART_GRID)
-      message("grid set to [ %i %i %i ].", initial_partition.grid[0],
+      message("    grid set to [ %i %i %i ].", initial_partition.grid[0],
               initial_partition.grid[1], initial_partition.grid[2]);
-    message("Using %s repartitioning", repartition_name[reparttype.type]);
+    message("  repartitioning: %s", repartition_name[reparttype.type]);
   }
 #endif
 
@@ -556,9 +561,23 @@ int main(int argc, char *argv[]) {
 
   /* How often to check for the stop file and dump restarts and exit the
    * application. */
-  int restart_stop_steps =
+  const int restart_stop_steps =
       parser_get_opt_param_int(params, "Restarts:stop_steps", 100);
 
+  /* Get the maximal wall-clock time of this run */
+  const float restart_max_hours_runtime =
+      parser_get_opt_param_float(params, "Restarts:max_run_time", FLT_MAX);
+
+  /* Do we want to resubmit when we hit the limit? */
+  const int resubmit_after_max_hours =
+      parser_get_opt_param_int(params, "Restarts:resubmit_on_exit", 0);
+
+  /* What command should we run to resubmit at the end? */
+  char resubmit_command[PARSER_MAX_LINE_SIZE];
+  if (resubmit_after_max_hours)
+    parser_get_param_string(params, "Restarts:resubmit_command",
+                            resubmit_command);
+
   /* If restarting, look for the restart files. */
   if (restart) {
 
@@ -644,6 +663,28 @@ int main(int argc, char *argv[]) {
       phys_const_print(&prog_const);
     }
 
+    /* Read particles and space information from ICs */
+    char ICfileName[200] = "";
+    parser_get_param_string(params, "InitialConditions:file_name", ICfileName);
+    const int periodic =
+        parser_get_param_int(params, "InitialConditions:periodic");
+    const int replicate =
+        parser_get_opt_param_int(params, "InitialConditions:replicate", 1);
+    clean_smoothing_length_values = parser_get_opt_param_int(
+        params, "InitialConditions:cleanup_smoothing_lengths", 0);
+    const int cleanup_h = parser_get_opt_param_int(
+        params, "InitialConditions:cleanup_h_factors", 0);
+    const int cleanup_sqrt_a = parser_get_opt_param_int(
+        params, "InitialConditions:cleanup_velocity_factors", 0);
+    const int generate_gas_in_ics = parser_get_opt_param_int(
+        params, "InitialConditions:generate_gas_in_ics", 0);
+
+    /* Some checks that we are not doing something stupid */
+    if (generate_gas_in_ics && flag_entropy_ICs)
+      error("Can't generate gas if the entropy flag is set in the ICs.");
+    if (generate_gas_in_ics && !with_cosmology)
+      error("Can't generate gas if the run is not cosmological.");
+
     /* Initialise the cosmology */
     if (with_cosmology)
       cosmology_init(params, &us, &prog_const, &cosmo);
@@ -663,29 +704,28 @@ int main(int argc, char *argv[]) {
     else
       bzero(&eos, sizeof(struct eos_parameters));
 
+    /* Initialise the entropy floor */
+    if (with_hydro)
+      entropy_floor_init(&entropy_floor, &prog_const, &us, &hydro_properties,
+                         params);
+    else
+      bzero(&entropy_floor, sizeof(struct entropy_floor_properties));
+
+    /* Initialise the stars properties */
+    if (with_stars)
+      stars_props_init(&stars_properties, &prog_const, &us, params,
+                       &hydro_properties);
+    else
+      bzero(&stars_properties, sizeof(struct stars_props));
+
     /* Initialise the gravity properties */
     if (with_self_gravity)
-      gravity_props_init(&gravity_properties, params, &cosmo, with_cosmology);
+      gravity_props_init(&gravity_properties, params, &cosmo, with_cosmology,
+                         periodic);
     else
       bzero(&gravity_properties, sizeof(struct gravity_props));
 
-    /* Read particles and space information from (GADGET) ICs */
-    char ICfileName[200] = "";
-    parser_get_param_string(params, "InitialConditions:file_name", ICfileName);
-    const int replicate =
-        parser_get_opt_param_int(params, "InitialConditions:replicate", 1);
-    clean_smoothing_length_values = parser_get_opt_param_int(
-        params, "InitialConditions:cleanup_smoothing_lengths", 0);
-    const int cleanup_h = parser_get_opt_param_int(
-        params, "InitialConditions:cleanup_h_factors", 0);
-    const int cleanup_sqrt_a = parser_get_opt_param_int(
-        params, "InitialConditions:cleanup_velocity_factors", 0);
-    const int generate_gas_in_ics = parser_get_opt_param_int(
-        params, "InitialConditions:generate_gas_in_ics", 0);
-    if (generate_gas_in_ics && flag_entropy_ICs)
-      error("Can't generate gas if the entropy flag is set in the ICs.");
-    if (generate_gas_in_ics && !with_cosmology)
-      error("Can't generate gas if the run is not cosmological.");
+    /* Be verbose about what happens next */
     if (myrank == 0) message("Reading ICs from file '%s'", ICfileName);
     if (myrank == 0 && cleanup_h)
       message("Cleaning up h-factors (h=%f)", cosmo.h);
@@ -696,20 +736,19 @@ int main(int argc, char *argv[]) {
     /* Get ready to read particles of all kinds */
     size_t Ngas = 0, Ngpart = 0, Nspart = 0;
     double dim[3] = {0., 0., 0.};
-    int periodic = 0;
     if (myrank == 0) clocks_gettime(&tic);
 #if defined(HAVE_HDF5)
 #if defined(WITH_MPI)
 #if defined(HAVE_PARALLEL_HDF5)
     read_ic_parallel(ICfileName, &us, dim, &parts, &gparts, &sparts, &Ngas,
-                     &Ngpart, &Nspart, &periodic, &flag_entropy_ICs, with_hydro,
+                     &Ngpart, &Nspart, &flag_entropy_ICs, with_hydro,
                      (with_external_gravity || with_self_gravity), with_stars,
                      cleanup_h, cleanup_sqrt_a, cosmo.h, cosmo.a, myrank,
                      nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL, nr_threads,
                      dry_run);
 #else
     read_ic_serial(ICfileName, &us, dim, &parts, &gparts, &sparts, &Ngas,
-                   &Ngpart, &Nspart, &periodic, &flag_entropy_ICs, with_hydro,
+                   &Ngpart, &Nspart, &flag_entropy_ICs, with_hydro,
                    (with_external_gravity || with_self_gravity), with_stars,
                    cleanup_h, cleanup_sqrt_a, cosmo.h, cosmo.a, myrank,
                    nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL, nr_threads,
@@ -717,7 +756,7 @@ int main(int argc, char *argv[]) {
 #endif
 #else
     read_ic_single(ICfileName, &us, dim, &parts, &gparts, &sparts, &Ngas,
-                   &Ngpart, &Nspart, &periodic, &flag_entropy_ICs, with_hydro,
+                   &Ngpart, &Nspart, &flag_entropy_ICs, with_hydro,
                    (with_external_gravity || with_self_gravity), with_stars,
                    cleanup_h, cleanup_sqrt_a, cosmo.h, cosmo.a, nr_threads,
                    dry_run);
@@ -730,20 +769,11 @@ int main(int argc, char *argv[]) {
       fflush(stdout);
     }
 
-#ifdef WITH_MPI
-    if (periodic && with_self_gravity)
-      error("Periodic self-gravity over MPI temporarily disabled.");
-#endif
-
-#if defined(WITH_MPI) && defined(HAVE_VELOCIRAPTOR)
-    if (with_structure_finding) error("VEOCIraptor not yet enabled over MPI.");
-#endif
-
 #ifdef SWIFT_DEBUG_CHECKS
     /* Check once and for all that we don't have unwanted links */
     if (!with_stars && !dry_run) {
       for (size_t k = 0; k < Ngpart; ++k)
-        if (gparts[k].type == swift_type_star) error("Linking problem");
+        if (gparts[k].type == swift_type_stars) error("Linking problem");
     }
     if (!with_hydro && !dry_run) {
       for (size_t k = 0; k < Ngpart; ++k)
@@ -769,32 +799,18 @@ int main(int argc, char *argv[]) {
 
     if (myrank == 0)
       message(
-          "Read %lld gas particles, %lld star particles and %lld gparts from "
-          "the "
-          "ICs.",
+          "Read %lld gas particles, %lld stars particles and %lld gparts from "
+          "the ICs.",
           N_total[0], N_total[2], N_total[1]);
 
     /* Verify that the fields to dump actually exist */
     if (myrank == 0) io_check_output_fields(params, N_total);
 
-    /* Initialise the long-range gravity mesh */
-    if (with_self_gravity && periodic) {
-#ifdef HAVE_FFTW
-      pm_mesh_init(&mesh, &gravity_properties, dim);
-#else
-      /* Need the FFTW library if periodic and self gravity. */
-      error(
-          "No FFTW library found. Cannot compute periodic long-range forces.");
-#endif
-    } else {
-      pm_mesh_init_no_mesh(&mesh, dim);
-    }
-
     /* Initialize the space with these data. */
     if (myrank == 0) clocks_gettime(&tic);
     space_init(&s, params, &cosmo, dim, parts, gparts, sparts, Ngas, Ngpart,
-               Nspart, periodic, replicate, generate_gas_in_ics,
-               with_self_gravity, talking, dry_run);
+               Nspart, periodic, replicate, generate_gas_in_ics, with_hydro,
+               with_self_gravity, with_star_formation, talking, dry_run);
 
     if (myrank == 0) {
       clocks_gettime(&toc);
@@ -803,6 +819,19 @@ int main(int argc, char *argv[]) {
       fflush(stdout);
     }
 
+    /* Initialise the long-range gravity mesh */
+    if (with_self_gravity && periodic) {
+#ifdef HAVE_FFTW
+      pm_mesh_init(&mesh, &gravity_properties, s.dim, nr_threads);
+#else
+      /* Need the FFTW library if periodic and self gravity. */
+      error(
+          "No FFTW library found. Cannot compute periodic long-range forces.");
+#endif
+    } else {
+      pm_mesh_init_no_mesh(&mesh, s.dim);
+    }
+
     /* Check that the matter content matches the cosmology given in the
      * parameter file. */
     if (with_cosmology && with_self_gravity && !dry_run)
@@ -835,6 +864,18 @@ int main(int argc, char *argv[]) {
       fflush(stdout);
     }
 
+    /* Verify that we are not using basic modes incorrectly */
+    if (with_hydro && N_total[0] == 0) {
+      error(
+          "ERROR: Running with hydrodynamics but no gas particles found in the "
+          "ICs!");
+    }
+    if ((with_self_gravity || with_external_gravity) && N_total[1] == 0) {
+      error(
+          "ERROR: Running with gravity but no gravity particles found in "
+          "the ICs!");
+    }
+
     /* Verify that each particle is in it's proper cell. */
     if (talking && !dry_run) {
       int icount = 0;
@@ -850,22 +891,29 @@ int main(int argc, char *argv[]) {
     }
 
     /* Initialise the external potential properties */
+    bzero(&potential, sizeof(struct external_potential));
     if (with_external_gravity)
       potential_init(params, &prog_const, &us, &s, &potential);
     if (myrank == 0) potential_print(&potential);
 
     /* Initialise the cooling function properties */
-    if (with_cooling) cooling_init(params, &us, &prog_const, &cooling_func);
+    bzero(&cooling_func, sizeof(struct cooling_function_data));
+    if (with_cooling || with_temperature)
+      cooling_init(params, &us, &prog_const, &cooling_func);
     if (myrank == 0) cooling_print(&cooling_func);
 
+    /* Initialise the star formation law and its properties */
+    bzero(&starform, sizeof(struct star_formation));
+    if (with_star_formation)
+      starformation_init(params, &prog_const, &us, &hydro_properties,
+                         &starform);
+    if (with_star_formation && myrank == 0) starformation_print(&starform);
+
     /* Initialise the chemistry */
+    bzero(&chemistry, sizeof(struct chemistry_global_data));
     chemistry_init(params, &us, &prog_const, &chemistry);
     if (myrank == 0) chemistry_print(&chemistry);
 
-    /* Initialise the feedback properties */
-    if (with_sourceterms) sourceterms_init(params, &us, &sourceterms);
-    if (with_sourceterms && myrank == 0) sourceterms_print(&sourceterms);
-
     /* Construct the engine policy */
     int engine_policies = ENGINE_POLICY | engine_policy_steal;
     if (with_drift_all) engine_policies |= engine_policy_drift_all;
@@ -876,9 +924,12 @@ int main(int argc, char *argv[]) {
     if (with_external_gravity)
       engine_policies |= engine_policy_external_gravity;
     if (with_cosmology) engine_policies |= engine_policy_cosmology;
+    if (with_temperature) engine_policies |= engine_policy_temperature;
+    if (with_limiter) engine_policies |= engine_policy_limiter;
     if (with_cooling) engine_policies |= engine_policy_cooling;
-    if (with_sourceterms) engine_policies |= engine_policy_sourceterms;
     if (with_stars) engine_policies |= engine_policy_stars;
+    if (with_star_formation) engine_policies |= engine_policy_star_formation;
+    if (with_feedback) engine_policies |= engine_policy_feedback;
     if (with_structure_finding)
       engine_policies |= engine_policy_structure_finding;
 
@@ -886,8 +937,9 @@ int main(int argc, char *argv[]) {
     if (myrank == 0) clocks_gettime(&tic);
     engine_init(&e, &s, params, N_total[0], N_total[1], N_total[2],
                 engine_policies, talking, &reparttype, &us, &prog_const, &cosmo,
-                &hydro_properties, &gravity_properties, &mesh, &potential,
-                &cooling_func, &chemistry, &sourceterms);
+                &hydro_properties, &entropy_floor, &gravity_properties,
+                &stars_properties, &mesh, &potential, &cooling_func, &starform,
+                &chemistry);
     engine_config(0, &e, params, nr_nodes, myrank, nr_threads, with_aff,
                   talking, restart_file);
 
@@ -902,15 +954,14 @@ int main(int argc, char *argv[]) {
     if (myrank == 0) {
       long long N_DM = N_total[1] - N_total[2] - N_total[0];
       message(
-          "Running on %lld gas particles, %lld star particles and %lld DM "
+          "Running on %lld gas particles, %lld stars particles and %lld DM "
           "particles (%lld gravity particles)",
           N_total[0], N_total[2], N_total[1] > 0 ? N_DM : 0, N_total[1]);
       message(
-          "from t=%.3e until t=%.3e with %d threads and %d queues "
-          "(dt_min=%.3e, "
-          "dt_max=%.3e)...",
-          e.time_begin, e.time_end, e.nr_threads, e.sched.nr_queues, e.dt_min,
-          e.dt_max);
+          "from t=%.3e until t=%.3e with %d ranks, %d threads / rank and %d "
+          "task queues / rank (dt_min=%.3e, dt_max=%.3e)...",
+          e.time_begin, e.time_end, nr_nodes, e.nr_threads, e.sched.nr_queues,
+          e.dt_min, e.dt_max);
       fflush(stdout);
     }
   }
@@ -950,24 +1001,20 @@ int main(int argc, char *argv[]) {
     engine_init_particles(&e, flag_entropy_ICs, clean_smoothing_length_values);
 
     /* Write the state of the system before starting time integration. */
+#ifdef WITH_LOGGER
+    logger_log_all(e.logger, &e);
+    engine_dump_index(&e);
+#endif
     engine_dump_snapshot(&e);
     engine_print_stats(&e);
 
     /* Is there a dump before the end of the first time-step? */
     engine_check_for_dumps(&e);
-
-#ifdef HAVE_VELOCIRAPTOR
-    /* Call VELOCIraptor for the first time after the first snapshot dump. */
-    // if (e.policy & engine_policy_structure_finding) {
-    // velociraptor_init(&e);
-    // velociraptor_invoke(&e);
-    //}
-#endif
   }
 
   /* Legend */
   if (myrank == 0) {
-    printf("# %6s %14s %14s %10s %14s %9s %12s %12s %12s %16s [%s] %6s\n",
+    printf("# %6s %14s %12s %12s %14s %9s %12s %12s %12s %16s [%s] %6s\n",
            "Step", "Time", "Scale-factor", "Redshift", "Time-step", "Time-bins",
            "Updates", "g-Updates", "s-Updates", "Wall-clock time",
            clocks_getunit(), "Props");
@@ -991,7 +1038,7 @@ int main(int argc, char *argv[]) {
 
   /* Main simulation loop */
   /* ==================== */
-  int force_stop = 0;
+  int force_stop = 0, resubmit = 0;
   for (int j = 0; !engine_is_done(&e) && e.step - 1 != nsteps && !force_stop;
        j++) {
 
@@ -1012,97 +1059,29 @@ int main(int argc, char *argv[]) {
         message("Forcing application exit, dumping restart files...");
     }
 
+    /* Did we exceed the maximal runtime? */
+    if (clocks_get_hours_since_start() > restart_max_hours_runtime) {
+      force_stop = 1;
+      message("Runtime limit reached, dumping restart files...");
+      if (resubmit_after_max_hours) resubmit = 1;
+    }
+
     /* Also if using nsteps to exit, will not have saved any restarts on exit,
      * make sure we do that (useful in testing only). */
     if (force_stop || (e.restart_onexit && e.step - 1 == nsteps))
       engine_dump_restarts(&e, 0, 1);
 
-#ifdef SWIFT_DEBUG_TASKS
     /* Dump the task data using the given frequency. */
     if (dump_tasks && (dump_tasks == 1 || j % dump_tasks == 1)) {
-#ifdef WITH_MPI
-
-      /* Make sure output file is empty, only on one rank. */
-      char dumpfile[30];
-      snprintf(dumpfile, 30, "thread_info_MPI-step%d.dat", j + 1);
-      FILE *file_thread;
-      if (myrank == 0) {
-        file_thread = fopen(dumpfile, "w");
-        fclose(file_thread);
-      }
-      MPI_Barrier(MPI_COMM_WORLD);
-
-      for (int i = 0; i < nr_nodes; i++) {
-
-        /* Rank 0 decides the index of writing node, this happens one-by-one. */
-        int kk = i;
-        MPI_Bcast(&kk, 1, MPI_INT, 0, MPI_COMM_WORLD);
-
-        if (i == myrank) {
-
-          /* Open file and position at end. */
-          file_thread = fopen(dumpfile, "a");
-
-          fprintf(file_thread,
-                  " %03d 0 0 0 0 %lld %lld %lld %lld %lld 0 0 %lld\n", myrank,
-                  e.tic_step, e.toc_step, e.updates, e.g_updates, e.s_updates,
-                  cpufreq);
-          int count = 0;
-          for (int l = 0; l < e.sched.nr_tasks; l++) {
-            if (!e.sched.tasks[l].implicit && e.sched.tasks[l].toc != 0) {
-              fprintf(
-                  file_thread,
-                  " %03i %i %i %i %i %lli %lli %i %i %i %i %i %i\n", myrank,
-                  e.sched.tasks[l].rid, e.sched.tasks[l].type,
-                  e.sched.tasks[l].subtype, (e.sched.tasks[l].cj == NULL),
-                  e.sched.tasks[l].tic, e.sched.tasks[l].toc,
-                  (e.sched.tasks[l].ci != NULL) ? e.sched.tasks[l].ci->count
-                                                : 0,
-                  (e.sched.tasks[l].cj != NULL) ? e.sched.tasks[l].cj->count
-                                                : 0,
-                  (e.sched.tasks[l].ci != NULL) ? e.sched.tasks[l].ci->gcount
-                                                : 0,
-                  (e.sched.tasks[l].cj != NULL) ? e.sched.tasks[l].cj->gcount
-                                                : 0,
-                  e.sched.tasks[l].flags, e.sched.tasks[l].sid);
-            }
-            fflush(stdout);
-            count++;
-          }
-          fclose(file_thread);
-        }
-
-        /* And we wait for all to synchronize. */
-        MPI_Barrier(MPI_COMM_WORLD);
-      }
+#ifdef SWIFT_DEBUG_TASKS
+      task_dump_all(&e, j + 1);
+#endif
 
-#else
-      char dumpfile[30];
-      snprintf(dumpfile, 30, "thread_info-step%d.dat", j + 1);
-      FILE *file_thread;
-      file_thread = fopen(dumpfile, "w");
-      /* Add some information to help with the plots */
-      fprintf(file_thread, " %d %d %d %d %lld %lld %lld %lld %lld %d %lld\n",
-              -2, -1, -1, 1, e.tic_step, e.toc_step, e.updates, e.g_updates,
-              e.s_updates, 0, cpufreq);
-      for (int l = 0; l < e.sched.nr_tasks; l++) {
-        if (!e.sched.tasks[l].implicit && e.sched.tasks[l].toc != 0) {
-          fprintf(
-              file_thread, " %i %i %i %i %lli %lli %i %i %i %i %i\n",
-              e.sched.tasks[l].rid, e.sched.tasks[l].type,
-              e.sched.tasks[l].subtype, (e.sched.tasks[l].cj == NULL),
-              e.sched.tasks[l].tic, e.sched.tasks[l].toc,
-              (e.sched.tasks[l].ci == NULL) ? 0 : e.sched.tasks[l].ci->count,
-              (e.sched.tasks[l].cj == NULL) ? 0 : e.sched.tasks[l].cj->count,
-              (e.sched.tasks[l].ci == NULL) ? 0 : e.sched.tasks[l].ci->gcount,
-              (e.sched.tasks[l].cj == NULL) ? 0 : e.sched.tasks[l].cj->gcount,
-              e.sched.tasks[l].sid);
-        }
-      }
-      fclose(file_thread);
-#endif  // WITH_MPI
+      /* Generate the task statistics. */
+      char dumpfile[40];
+      snprintf(dumpfile, 40, "thread_stats-step%d.dat", j + 1);
+      task_dump_stats(dumpfile, &e, /* header = */ 0, /* allranks = */ 1);
     }
-#endif  // SWIFT_DEBUG_TASKS
 
 #ifdef SWIFT_DEBUG_THREADPOOL
     /* Dump the task data using the given frequency. */
@@ -1137,33 +1116,44 @@ int main(int argc, char *argv[]) {
 
     /* Print some information to the screen */
     printf(
-        "  %6d %14e %14e %10.5f %14e %4d %4d %12lld %12lld %12lld %21.3f %6d\n",
+        "  %6d %14e %12.7f %12.7f %14e %4d %4d %12lld %12lld %12lld %21.3f "
+        "%6d\n",
         e.step, e.time, e.cosmology->a, e.cosmology->z, e.time_step,
         e.min_active_bin, e.max_active_bin, e.updates, e.g_updates, e.s_updates,
         e.wallclock_time, e.step_props);
     fflush(stdout);
 
-    fprintf(
-        e.file_timesteps,
-        "  %6d %14e %14e %10.5f %14e %4d %4d %12lld %12lld %12lld %21.3f %6d\n",
-        e.step, e.time, e.cosmology->a, e.cosmology->z, e.time_step,
-        e.min_active_bin, e.max_active_bin, e.updates, e.g_updates, e.s_updates,
-        e.wallclock_time, e.step_props);
+    fprintf(e.file_timesteps,
+            "  %6d %14e %12.7f %12.7f %14e %4d %4d %12lld %12lld %12lld %21.3f "
+            "%6d\n",
+            e.step, e.time, e.cosmology->a, e.cosmology->z, e.time_step,
+            e.min_active_bin, e.max_active_bin, e.updates, e.g_updates,
+            e.s_updates, e.wallclock_time, e.step_props);
     fflush(e.file_timesteps);
   }
 
   /* Write final output. */
-  engine_drift_all(&e);
-  engine_print_stats(&e);
-  engine_dump_snapshot(&e);
+  if (!force_stop) {
+    engine_drift_all(&e, /*drift_mpole=*/0);
+    engine_print_stats(&e);
+#ifdef WITH_LOGGER
+    logger_log_all(e.logger, &e);
+    engine_dump_index(&e);
+#endif
 
 #ifdef HAVE_VELOCIRAPTOR
-  /* Call VELOCIraptor at the end of the run to find groups. */
-  if (e.policy & engine_policy_structure_finding) {
-    velociraptor_init(&e);
-    velociraptor_invoke(&e);
-  }
+    if (with_structure_finding && e.snapshot_invoke_stf)
+      velociraptor_invoke(&e, /*linked_with_snap=*/1);
+#endif
+
+    /* write a final snapshot */
+    engine_dump_snapshot(&e);
+
+#ifdef HAVE_VELOCIRAPTOR
+    if (with_structure_finding && e.snapshot_invoke_stf)
+      free(e.s->gpart_group_data);
 #endif
+  }
 
 #ifdef WITH_MPI
   if ((res = MPI_Finalize()) != MPI_SUCCESS)
@@ -1174,10 +1164,20 @@ int main(int argc, char *argv[]) {
    * stop file if normal exit happened first. */
   if (myrank == 0) force_stop = restart_stop_now(restart_dir, 1);
 
+  /* Did we want to run a re-submission command just before dying? */
+  if (myrank == 0 && resubmit) {
+    message("Running the resubmission command:");
+    restart_resubmit(resubmit_command);
+    fflush(stdout);
+    fflush(stderr);
+    message("resubmission command completed.");
+  }
+
   /* Clean everything */
   if (with_verbose_timers) timers_close_file();
-  if (with_cosmology) cosmology_clean(&cosmo);
-  if (with_self_gravity) pm_mesh_clean(&mesh);
+  if (with_cosmology) cosmology_clean(e.cosmology);
+  if (with_self_gravity) pm_mesh_clean(e.mesh);
+  if (with_cooling || with_temperature) cooling_clean(&cooling_func);
   engine_clean(&e);
   free(params);
 
diff --git a/examples/parameter_example.yml b/examples/parameter_example.yml
index 5fb48eb17d1c210d2a320917e1fd5d1ad67ddd94..22bbf3db4f4f49f1cce6c1aa817b8228f829437f 100644
--- a/examples/parameter_example.yml
+++ b/examples/parameter_example.yml
@@ -28,11 +28,22 @@ SPH:
   CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
   h_tolerance:           1e-4     # (Optional) Relative accuracy of the Netwon-Raphson scheme for the smoothing lengths.
   h_max:                 10.      # (Optional) Maximal allowed smoothing length in internal units. Defaults to FLT_MAX if unspecified.
+  h_min_ratio:           0.       # (Optional) Minimal allowed smoothing length in units of the softening. Defaults to 0 if unspecified.
   max_volume_change:     1.4      # (Optional) Maximal allowed change of kernel volume over one time-step.
   max_ghost_iterations:  30       # (Optional) Maximal number of iterations allowed to converge towards the smoothing length.
   initial_temperature:   0        # (Optional) Initial temperature (in internal units) to set the gas particles at start-up. Value is ignored if set to 0.
   minimal_temperature:   0        # (Optional) Minimal temperature (in internal units) allowed for the gas particles. Value is ignored if set to 0.
-  H_mass_fraction:       0.76     # (Optional) Hydrogen mass fraction used for initial conversion from temp to internal energy.
+  H_mass_fraction:       0.755    # (Optional) Hydrogen mass fraction used for initial conversion from temp to internal energy. Default value is derived from the physical constants.
+  H_ionization_temperature: 1e4   # (Optional) Temperature of the transition from neutral to ionized Hydrogen for primoridal gas.
+  viscosity_alpha:       0.8      # (Optional) Override for the initial value of the artificial viscosity. In schemes that have a fixed AV, this remains as alpha throughout the run.
+  viscosity_alpha_max:   2.0      # (Optional) Maximal value for the artificial viscosity in schemes that allow alpha to vary.
+  viscosity_alpha_min:   0.1      # (Optional) Minimal value for the artificial viscosity in schemes that allow alpha to vary.
+  viscosity_length:      0.1      # (Optional) Decay length for the artificial viscosity in schemes that allow alpha to vary.
+  diffusion_alpha:       0.0      # (Optional) Override the initial value for the thermal diffusion coefficient in schemes with thermal diffusion.
+  diffusion_beta:        0.01     # (Optional) Override the decay/rise rate tuning parameter for the thermal diffusion.
+  diffusion_alpha_max:   1.0      # (Optional) Override the maximal thermal diffusion coefficient that is allowed for a given particle.
+  diffusion_alpha_min:   0.0      # (Optional) Override the minimal thermal diffusion coefficient that is allowed for a given particle.
+
 
 # Parameters for the self-gravity scheme
 Gravity:
@@ -55,9 +66,13 @@ Scheduler:
   cell_sub_size_pair_grav:   256000000 # (Optional) Maximal number of interactions per sub-pair gravity task  (this is the default value).
   cell_sub_size_self_grav:   32000     # (Optional) Maximal number of interactions per sub-self gravity task  (this is the default value).
   cell_split_size:           400       # (Optional) Maximal number of particles per cell (this is the default value).
-  cell_subdepth_grav:        2         # (Optional) Maximal depth the gravity tasks can be pushed down (this is the default value).
+  cell_subdepth_diff_grav:   4         # (Optional) Maximal depth difference between leaves and a cell that gravity tasks can be pushed down to (this is the default value).
+  cell_extra_parts:          0         # (Optional) Number of spare parts per top-level allocated at rebuild time for on-the-fly creation.
+  cell_extra_gparts:         0         # (Optional) Number of spare gparts per top-level allocated at rebuild time for on-the-fly creation.
+  cell_extra_sparts:         400       # (Optional) Number of spare sparts per top-level allocated at rebuild time for on-the-fly creation.
   max_top_level_cells:       12        # (Optional) Maximal number of top-level cells in any dimension. The number of top-level cells will be the cube of this (this is the default value).
-  tasks_per_cell:            0         # (Optional) The average number of tasks per cell. If not large enough the simulation will fail (means guess...).
+  tasks_per_cell:            0.0       # (Optional) The average number of tasks per cell. If not large enough the simulation will fail (means guess...).
+  links_per_tasks:           10        # (Optional) The average number of links per tasks (before adding the communication tasks). If not large enough the simulation will fail (means guess...). Defaults to 10.
   mpi_message_limit:         4096      # (Optional) Maximum MPI task message size to send non-buffered, KB.
 
 # Parameters governing the time integration (Set dt_min and dt_max to the same value for a fixed time-step run.)
@@ -74,9 +89,9 @@ Snapshots:
   scale_factor_first: 0.1 # (Optional) Scale-factor of the first snapshot if cosmological time-integration.
   time_first: 0.          # (Optional) Time of the first output if non-cosmological time-integration (in internal units)
   delta_time: 0.01        # Time difference between consecutive outputs (in internal units)
+  invoke_stf: 0           # (Optional) Call VELOCIraptor every time a snapshot is written irrespective of the VELOCIraptor output strategy.
   compression: 0          # (Optional) Set the level of compression of the HDF5 datasets [0-9]. 0 does no compression.
-  label_first: 0          # (Optional) An additional offset for the snapshot output label
-  label_delta: 1          # (Optional) Set the integer increment between snapshot output labels
+  int_time_label_on:   0  # (Optional) Enable to label the snapshots using the time rounded to an integer (in internal units)
   UnitMass_in_cgs:     1  # (Optional) Unit system for the outputs (Grams)
   UnitLength_in_cgs:   1  # (Optional) Unit system for the outputs (Centimeters)
   UnitVelocity_in_cgs: 1  # (Optional) Unit system for the outputs (Centimeters per second)
@@ -85,6 +100,13 @@ Snapshots:
   output_list_on:      0  # (Optional) Enable the output list
   output_list:         snaplist.txt # (Optional) File containing the output times (see documentation in "Parameter File" section)
 
+# Parameters governing the logger snapshot system
+Logger:
+  delta_step:           10     # Update the particle log every this many updates
+  initial_buffer_size:  1      # buffer size in GB
+  buffer_scale:		10     # (Optional) When buffer size is too small, update it with required memory times buffer_scale
+  basename:             index  # Common part of the filenames
+  
 # Parameters governing the conserved quantities statistics
 Statistics:
   delta_time:           1e-2     # Time between statistics output
@@ -98,6 +120,7 @@ Statistics:
 # Parameters related to the initial conditions
 InitialConditions:
   file_name:  SedovBlast/sedov.hdf5 # The file to read
+  periodic:                    1    # Are we running with periodic ICs?
   generate_gas_in_ics:         0    # (Optional) Generate gas particles from the DM-only ICs (e.g. from panphasia).
   cleanup_h_factors:           0    # (Optional) Clean up the h-factors used in the ICs (e.g. in Gadget files).
   cleanup_velocity_factors:    0    # (Optional) Clean up the scale-factors used in the definition of the velocity variable in the ICs (e.g. in Gadget files).
@@ -108,30 +131,47 @@ InitialConditions:
 
 # Parameters controlling restarts
 Restarts:
-  enable:      1        # (Optional) whether to enable dumping restarts at fixed intervals.
-  save:        1        # (Optional) whether to save copies of the previous set of restart files (named .prev)
-  onexit:      0        # (Optional) whether to dump restarts on exit (*needs enable*)
-  subdir:      restart  # (Optional) name of subdirectory for restart files.
-  basename:    swift    # (Optional) prefix used in naming restart files.
-  delta_hours: 6.0      # (Optional) decimal hours between dumps of restart files.
-  stop_steps:  100      # (Optional) how many steps to process before checking if the <subdir>/stop file exists. When present the application will attempt to exit early, dumping restart files first.
+  enable:             1          # (Optional) whether to enable dumping restarts at fixed intervals.
+  save:               1          # (Optional) whether to save copies of the previous set of restart files (named .prev)
+  onexit:             0          # (Optional) whether to dump restarts on exit (*needs enable*)
+  subdir:             restart    # (Optional) name of subdirectory for restart files.
+  basename:           swift      # (Optional) prefix used in naming restart files.
+  delta_hours:        6.0        # (Optional) decimal hours between dumps of restart files.
+  stop_steps:         100        # (Optional) how many steps to process before checking if the <subdir>/stop file exists. When present the application will attempt to exit early, dumping restart files first.
+  max_run_time:       24.0       # (optional) Maximal wall-clock time in hours. The application will exit when this limit is reached.
+  resubmit_on_exit:   0          # (Optional) whether to run a command when exiting after the time limit has been reached.
+  resubmit_command:   ./resub.sh # (Optional) Command to run when time limit is reached. Compulsory if resubmit_on_exit is switched on. Note potentially unsafe.
 
 # Parameters governing domain decomposition
 DomainDecomposition:
-  initial_type:     simple_metis # (Optional) The initial decomposition strategy: "grid",
-                                 #            "simple_metis", "weighted_metis", or "vectorized".
-  initial_grid: [10,10,10] # (Optional) Grid sizes if the "grid" strategy is chosen.
+  initial_type:     memory    # (Optional) The initial decomposition strategy: "grid",
+                              #            "region", "memory", or "vectorized".
+  initial_grid: [10,10,10]    # (Optional) Grid sizes if the "grid" strategy is chosen.
 
-  repartition_type: costs/costs # (Optional) The re-decomposition strategy, one of:
-                            # "none/none", "costs/costs", "counts/none", "none/costs", "counts/costs",
-                            # "costs/time" or "none/time".
-                            # These are vertex/edge weights with "costs" as task timing, "counts" as
-                            # sum of particles and "time" as the expected time of the next updates
+  repartition_type: fullcosts # (Optional) The re-decomposition strategy, one of:
+                              # "none", "fullcosts", "edgecosts", "memory" or
+                              # "timecosts".
+  trigger:          0.05      # (Optional) Fractional (<1) CPU time difference between MPI ranks required to trigger a
+                              # new decomposition, or number of steps (>1) between decompositions
+  minfrac:          0.9       # (Optional) Fractional of all particles that should be updated in previous step when
+                              # using CPU time trigger
+  usemetis:         0         # Use serial METIS when ParMETIS is also available.
+  adaptive:         1         # Use adaptive repartition when ParMETIS is available, otherwise simple refinement.
+  itr:              100       # When adaptive defines the ratio of inter node communication time to data redistribution time, in the range 0.00001 to 10000000.0.
+                              # Lower values give less data movement during redistributions, at the cost of global balance which may require more communication.
+  use_fixed_costs:  0         # If 1 then use any compiled in fixed costs for
+                              # task weights in first repartition, if 0 only use task timings, if > 1 only use
+                              # fixed costs, unless none are available.
 
-  trigger:          0.05    # (Optional) Fractional (<1) CPU time difference between MPI ranks required to trigger a
-                            # new decomposition, or number of steps (>1) between decompositions
-  minfrac:          0.9     # (Optional) Fractional of all particles that should be updated in previous step when
-                            # using CPU time trigger
+# Structure finding options (requires velociraptor)
+StructureFinding:
+  config_file_name:     stf_input.cfg # Name of the STF config file.
+  basename:             ./stf         # Common part of the name of output files.
+  scale_factor_first:   0.92          # (Optional) Scale-factor of the first snaphot (cosmological run)
+  time_first:           0.01          # (Optional) Time of the first structure finding output (in internal units).
+  delta_time:           1.10          # (Optional) Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
+  output_list_on:       0   	      # (Optional) Enable the output list
+  output_list:          stflist.txt   # (Optional) File containing the output times (see documentation in "Parameter File" section)
 
 # Parameters related to the equation of state ------------------------------------------
 
@@ -143,18 +183,19 @@ EoS:
   planetary_use_ANEOS:  0   # (Optional) Whether to prepare the ANEOS EOS
   planetary_use_SESAME: 0   # (Optional) Whether to prepare the SESAME EOS
                             # (Optional) Table file paths
-  planetary_HM80_HHe_table_file:        ./equation_of_state/planetary_HM80_HHe.txt
-  planetary_HM80_ice_table_file:        ./equation_of_state/planetary_HM80_ice.txt
-  planetary_HM80_rock_table_file:       ./equation_of_state/planetary_HM80_rock.txt
-  planetary_SESAME_iron_table_file:     ./equation_of_state/planetary_SESAME_iron_2140.txt
-  planetary_SESAME_basalt_table_file:   ./equation_of_state/planetary_SESAME_basalt_7530.txt
-  planetary_SESAME_water_table_file:    ./equation_of_state/planetary_SESAME_water_7154.txt
-  planetary_SS08_water_table_file:      ./equation_of_state/planetary_SS08_water.txt
+  planetary_HM80_HHe_table_file:        ./EoSTables/planetary_HM80_HHe.txt
+  planetary_HM80_ice_table_file:        ./EoSTables/planetary_HM80_ice.txt
+  planetary_HM80_rock_table_file:       ./EoSTables/planetary_HM80_rock.txt
+  planetary_SESAME_iron_table_file:     ./EoSTables/planetary_SESAME_iron_2140.txt
+  planetary_SESAME_basalt_table_file:   ./EoSTables/planetary_SESAME_basalt_7530.txt
+  planetary_SESAME_water_table_file:    ./EoSTables/planetary_SESAME_water_7154.txt
+  planetary_SS08_water_table_file:      ./EoSTables/planetary_SS08_water.txt
 
 # Parameters related to external potentials --------------------------------------------
 
 # Point mass external potentials
 PointMassPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
   position:        [50.,50.0,50.]      # location of external point mass (internal units)
   mass:            1e10                # mass of external point mass (internal units)
   timestep_mult:   0.03                # Dimensionless pre-factor for the time-step condition
@@ -162,10 +203,38 @@ PointMassPotential:
 
 # Isothermal potential parameters
 IsothermalPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
   position:        [100.,100.,100.]    # Location of centre of isothermal potential with respect to centre of the box (internal units)
   vrot:            200.     # Rotation speed of isothermal potential (internal units)
   timestep_mult:   0.03     # Dimensionless pre-factor for the time-step condition
   epsilon:         0.1      # Softening size (internal units)
+  
+# Hernquist potential parameters
+HernquistPotential:
+  useabspos:       0        # 0 -> positions based on centre, 1 -> absolute positions 
+  position:        [100.,100.,100.]    # Location of centre of isothermal potential with respect to centre of the box (if 0) otherwise absolute (if 1) (internal units)
+  idealizeddisk:   0        # (Optional) Whether to run with idealizeddisk or without, 0 used the mass and scalelength as mandatory parameters, while 1 uses more advanced disk dependent paramters
+  mass:            1e10     # (Optional 0) default parameter, Mass of the Hernquist potential
+  scalelength:     10.0     # (Optional 0) default parameter, Scale length of the potential
+                            # If multiple X200 values are given, only one is used, in the order M200 > V200 > R200.
+  M200:            3e11     # (Optional 1a) M200 of the galaxy+halo (when used V200 and R200 are not used)
+  V200:            100.     # (Optional 1b) V200 of the galaxy+halo (when used M200 and R200 are not used, if M200 is given M200 is used)
+  R200:            10.      # (Optional 1c) R200 of the galaxy+halo (when used M200 and V200 are not used, if M200 or V200 are given they are used)
+  h:               0.704    # (Optional 1) reduced Hubble constant
+  concentration:   7.1      # (Optional 1) concentration of the Halo
+  diskfraction:              0.0434370991372   # (Optional 1) Disk mass fraction (equal to MD in MakeNewDisk and GalIC)
+  bulgefraction:              0.00705852860979  # (Optional 1) Bulge mass fraction (equal to MB in MakeNewDisk and GalIC)
+  timestep_mult:   0.01     # Dimensionless pre-factor for the time-step condition, basically determines the fraction of the orbital time we use to do the time integration
+  epsilon:         0.1      # Softening size (internal units)
+ 
+# Isothermal potential parameters
+NFWPotential:
+  useabspos:          0
+  position:           [0.0,0.0,0.0]      # Location of centre of isothermal potential with respect to centre of the box (internal units) if useabspos=0 otherwise with respect to the 0,0,0, coordinates.
+  concentration:      8.       # Concentration of the halo
+  M_200:              2.0e+12  # Mass of the halo (M_200 in internal units)
+  critical_density:   127.4    # Critical density (internal units).
+  timestep_mult:      0.01     # Dimensionless pre-factor for the time-step condition, basically determines fraction of orbital time we need to do an integration step
 
 # Disk-patch potential parameters
 DiscPatchPotential:
@@ -183,6 +252,18 @@ SineWavePotential:
   timestep_limit:   1.      # Time-step dimensionless pre-factor.
   growth_time:      0.      # (Optional) Time for the potential to grow to its final size.
 
+# Parameters related to entropy floors    ----------------------------------------------
+
+EAGLEEntropyFloor:
+  Jeans_density_threshold_H_p_cm3: 0.1       # Physical density above which the EAGLE Jeans limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Jeans_over_density_threshold:    10.       # Overdensity above which the EAGLE Jeans limiter entropy floor can kick in.
+  Jeans_temperature_norm_K:        8000      # Temperature of the EAGLE Jeans limiter entropy floor at the density threshold expressed in Kelvin.
+  Jeans_gamma_effective:           1.3333333 # Slope the of the EAGLE Jeans limiter entropy floor
+  Cool_density_threshold_H_p_cm3:  1e-5      # Physical density above which the EAGLE Cool limiter entropy floor kicks in expressed in Hydrogen atoms per cm^3.
+  Cool_over_density_threshold:     10.       # Overdensity above which the EAGLE Cool limiter entropy floor can kick in.
+  Cool_temperature_norm_K:         8000      # Temperature of the EAGLE Cool limiter entropy floor at the density threshold expressed in Kelvin.
+  Cool_gamma_effective:            1.        # Slope the of the EAGLE Cool limiter entropy floor
+  
 # Parameters related to cooling function  ----------------------------------------------
 
 # Constant du/dt cooling function
@@ -193,12 +274,20 @@ ConstCooling:
 
 # Constant lambda cooling function
 LambdaCooling:
-  lambda:                      2.0   # Cooling rate (in cgs units)
-  minimum_temperature:         1.0e4 # Minimal temperature (Kelvin)
-  mean_molecular_weight:       0.59  # Mean molecular weight
-  hydrogen_mass_abundance:     0.75  # Hydrogen mass abundance (dimensionless)
-  cooling_tstep_mult:          1.0   # Dimensionless pre-factor for the time-step condition
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
+  cooling_tstep_mult:          1.0   # (Optional) Dimensionless pre-factor for the time-step condition.
 
+# Parameters of the EAGLE cooling model (Wiersma+08 cooling tables).
+EAGLECooling:
+  dir_name:                  ./coolingtables/  # Location of the Wiersma+08 cooling tables
+  H_reion_z:                 8.5               # Redshift of Hydrogen re-ionization
+  He_reion_z_centre:         3.5               # Redshift of the centre of the Helium re-ionization Gaussian
+  He_reion_z_sigma:          0.5               # Spread in redshift of the  Helium re-ionization Gaussian
+  He_reion_eV_p_H:           2.0               # Energy inject by Helium re-ionization in electron-volt per Hydrogen atom
+  newton_integration:        0                 # (Optional) Set to 1 to use the Newton-Raphson method to solve the xplicit cooling problem.
+  Ca_over_Si_in_solar:       1.                # (Optional) Ratio of Ca/Si to use in units of solar. If set to 1, the code uses [Ca/Si] = 0, i.e. Ca/Si = 0.0941736.
+  S_over_Si_in_solar:        1.                # (Optional) Ratio of S/Si to use in units of solar. If set to 1, the code uses [S/Si] = 0, i.e. S/Si = 0.6054160.
+  
 # Cooling with Grackle 3.0
 GrackleCooling:
   CloudyTable: CloudyData_UVB=HM2012.h5 # Name of the Cloudy Table (available on the grackle bitbucket repository)
@@ -208,7 +297,6 @@ GrackleCooling:
   ProvideVolumetricHeatingRates: 0      # (optional) User provide volumetric heating rates
   ProvideSpecificHeatingRates: 0        # (optional) User provide specific heating rates
   SelfShieldingMethod: 0                # (optional) Grackle (<= 3) or Gear self shielding method
-  OutputMode: 0                         # (optional) Write in output corresponding primordial chemistry mode
   MaxSteps: 10000                       # (optional) Max number of step when computing the initial composition
   ConvergenceLimit: 1e-2                # (optional) Convergence threshold (relative) for initial composition
 
@@ -216,27 +304,34 @@ GrackleCooling:
 
 # EAGLE model
 EAGLEChemistry:
-  InitMetallicity:         0.           # Inital fraction of particle mass in *all* metals
-  InitAbundance_Hydrogen:  0.752        # Inital fraction of particle mass in Hydrogen
-  InitAbundance_Helium:    0.248        # Inital fraction of particle mass in Helium
-  InitAbundance_Carbon:    0.000        # Inital fraction of particle mass in Carbon
-  InitAbundance_Nitrogen:  0.000        # Inital fraction of particle mass in Nitrogen
-  InitAbundance_Oxygen:    0.000        # Inital fraction of particle mass in Oxygen
-  InitAbundance_Neon:      0.000        # Inital fraction of particle mass in Neon
-  InitAbundance_Magnesium: 0.000        # Inital fraction of particle mass in Magnesium
-  InitAbundance_Silicon:   0.000        # Inital fraction of particle mass in Silicon
-  InitAbundance_Iron:      0.000        # Inital fraction of particle mass in Iron
-  CalciumOverSilicon:      0.0941736    # Constant ratio of Calcium over Silicon abundance
-  SulphurOverSilicon:      0.6054160    # Constant ratio of Sulphur over Silicon abundance
+  init_abundance_metal:     0.           # Inital fraction of particle mass in *all* metals
+  init_abundance_Hydrogen:  0.752        # Inital fraction of particle mass in Hydrogen
+  init_abundance_Helium:    0.248        # Inital fraction of particle mass in Helium
+  init_abundance_Carbon:    0.000        # Inital fraction of particle mass in Carbon
+  init_abundance_Nitrogen:  0.000        # Inital fraction of particle mass in Nitrogen
+  init_abundance_Oxygen:    0.000        # Inital fraction of particle mass in Oxygen
+  init_abundance_Neon:      0.000        # Inital fraction of particle mass in Neon
+  init_abundance_Magnesium: 0.000        # Inital fraction of particle mass in Magnesium
+  init_abundance_Silicon:   0.000        # Inital fraction of particle mass in Silicon
+  init_abundance_Iron:      0.000        # Inital fraction of particle mass in Iron
 
-# Structure finding options (requires velociraptor)
-StructureFinding:
-  config_file_name:     stf_input.cfg # Name of the STF config file.
-  basename:             ./stf         # Common part of the name of output files.
-  output_time_format:   0             # Specifies the frequency format of structure finding. 0 for simulation steps (delta_step) and 1 for simulation time intervals (delta_time).
-  scale_factor_first:   0.92          # Scale-factor of the first snaphot (cosmological run)
-  time_first:           0.01          # Time of the first structure finding output (in internal units).
-  delta_step:           1000          # Time difference between consecutive structure finding outputs (in internal units) in simulation steps.
-  delta_time:           1.10          # Time difference between consecutive structure finding outputs (in internal units) in simulation time intervals.
-  output_list_on:      0   	      # (Optional) Enable the output list
-  output_list:         stflist.txt    # (Optional) File containing the output times (see documentation in "Parameter File" section)
+# Parameters related to star formation models  -----------------------------------------------
+
+# EAGLE star formation model (Schaye and Dalla Vecchia 2008)
+EAGLEStarFormation:
+  EOS_density_norm_H_p_cm3:          0.1       # Physical density used for the normalisation of the EOS assumed for the star-forming gas in Hydrogen atoms per cm^3.
+  EOS_temperature_norm_K:            8000      # Temperature om the polytropic EOS assumed for star-forming gas at the density normalisation in Kelvin.
+  EOS_gamma_effective:               1.3333333 # Slope the of the polytropic EOS assumed for the star-forming gas.
+  gas_fraction:                      0.25      # (Optional) The gas fraction used internally by the model (Defaults to 1).
+  KS_normalisation:                  1.515e-4  # The normalization of the Kennicutt-Schmidt law in Msun / kpc^2 / yr.
+  KS_exponent:                       1.4       # The exponent of the Kennicutt-Schmidt law.
+  KS_min_over_density:               57.7      # The over-density above which star-formation is allowed.
+  KS_high_density_threshold_H_p_cm3: 1e3       # Hydrogen number density above which the Kennicut-Schmidt law changes slope in Hydrogen atoms per cm^3.
+  KS_high_density_exponent:          2.0       # Slope of the Kennicut-Schmidt law above the high-density threshold.
+  KS_max_density_threshold_H_p_cm3:  1e5       # (Optional) Density above which a gas particle gets automatically turned into a star in Hydrogen atoms per cm^3 (Defaults to FLT_MAX).
+  KS_temperature_margin_dex:         0.5       # (Optional) Logarithm base 10 of the maximal temperature difference above the EOS allowed to form stars (Defaults to FLT_MAX).
+  threshold_norm_H_p_cm3:            0.1       # Normalisation of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.
+  threshold_Z0:                      0.002     # Reference metallicity (metal mass fraction) for the metal-dependant threshold for star formation.
+  threshold_slope:                   -0.64     # Slope of the metal-dependant star formation threshold
+  threshold_max_density_H_p_cm3:     10.0      # Maximal density of the metal-dependant density threshold for star formation in Hydrogen atoms per cm^3.  
+  
diff --git a/examples/plot_gravity_checks.py b/examples/plot_gravity_checks.py
deleted file mode 100755
index 23866ac2a6952ff918dbc80533269c0d2e9bcbc5..0000000000000000000000000000000000000000
--- a/examples/plot_gravity_checks.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import glob
-import re
-import numpy as np
-import matplotlib.pyplot as plt
-
-params = {'axes.labelsize': 14,
-'axes.titlesize': 18,
-'font.size': 12,
-'legend.fontsize': 12,
-'xtick.labelsize': 14,
-'ytick.labelsize': 14,
-'text.usetex': True,
-'figure.figsize': (12, 10),
-'figure.subplot.left'    : 0.06,
-'figure.subplot.right'   : 0.99  ,
-'figure.subplot.bottom'  : 0.06  ,
-'figure.subplot.top'     : 0.99  ,
-'figure.subplot.wspace'  : 0.14  ,
-'figure.subplot.hspace'  : 0.14  ,
-'lines.markersize' : 6,
-'lines.linewidth' : 3.,
-'text.latex.unicode': True
-}
-plt.rcParams.update(params)
-plt.rc('font',**{'family':'sans-serif','sans-serif':['Times']})
-
-min_error = 1e-7
-max_error = 3e-1
-num_bins = 64
-
-# Construct the bins
-bin_edges = np.linspace(np.log10(min_error), np.log10(max_error), num_bins + 1)
-bin_size = (np.log10(max_error) - np.log10(min_error)) / num_bins
-bins = 0.5*(bin_edges[1:] + bin_edges[:-1])
-bin_edges = 10**bin_edges
-bins = 10**bins
-
-# Colours
-cols = ['#332288', '#88CCEE', '#117733', '#DDCC77', '#CC6677']
-
-# Time-step to plot
-step = int(sys.argv[1])
-periodic = int(sys.argv[2])
-
-# Find the files for the different expansion orders
-order_list = glob.glob("gravity_checks_swift_step%.4d_order*.dat"%step)
-num_order = len(order_list)
-
-# Get the multipole orders
-order = np.zeros(num_order)
-for i in range(num_order):
-    order[i] = int(order_list[i][35])
-order = sorted(order)
-order_list = sorted(order_list)
-
-# Read the exact accelerations first
-if periodic:
-    data = np.loadtxt('gravity_checks_exact_periodic_step%.4d.dat'%step)
-else:
-    data = np.loadtxt('gravity_checks_exact_step%.4d.dat'%step)
-exact_ids = data[:,0]
-exact_pos = data[:,1:4]
-exact_a = data[:,4:7]
-exact_pot = data[:,7]
-# Sort stuff
-sort_index = np.argsort(exact_ids)
-exact_ids = exact_ids[sort_index]
-exact_pos = exact_pos[sort_index, :]
-exact_a = exact_a[sort_index, :]        
-exact_pot = exact_pot[sort_index]
-exact_a_norm = np.sqrt(exact_a[:,0]**2 + exact_a[:,1]**2 + exact_a[:,2]**2)
-
-print "Number of particles tested:", np.size(exact_ids)
-    
-# Start the plot
-plt.figure()
-
-count = 0
-
-# Get the Gadget-2 data if existing
-if periodic:
-    gadget2_file_list = glob.glob("forcetest_gadget2_periodic.txt")
-else:
-    gadget2_file_list = glob.glob("forcetest_gadget2.txt")
-if len(gadget2_file_list) != 0:
-
-    gadget2_data = np.loadtxt(gadget2_file_list[0])
-    gadget2_ids = gadget2_data[:,0]
-    gadget2_pos = gadget2_data[:,1:4]
-    gadget2_a_exact = gadget2_data[:,4:7]
-    gadget2_a_grav = gadget2_data[:, 7:10]
-
-    # Sort stuff
-    sort_index = np.argsort(gadget2_ids)
-    gadget2_ids = gadget2_ids[sort_index]
-    gadget2_pos = gadget2_pos[sort_index, :]
-    gadget2_a_exact = gadget2_a_exact[sort_index, :]
-    gadget2_exact_a_norm = np.sqrt(gadget2_a_exact[:,0]**2 + gadget2_a_exact[:,1]**2 + gadget2_a_exact[:,2]**2)
-    gadget2_a_grav = gadget2_a_grav[sort_index, :]
-
-    # Cross-checks
-    if not np.array_equal(exact_ids, gadget2_ids):
-        print "Comparing different IDs !"
-
-    if np.max(np.abs(exact_pos - gadget2_pos)/np.abs(gadget2_pos)) > 1e-6:
-        print "Comparing different positions ! max difference:"
-        index = np.argmax(exact_pos[:,0]**2 + exact_pos[:,1]**2 + exact_pos[:,2]**2 - gadget2_pos[:,0]**2 - gadget2_pos[:,1]**2 - gadget2_pos[:,2]**2)
-        print "Gadget2 (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n"
-
-    diff = np.abs(exact_a_norm - gadget2_exact_a_norm) / np.abs(gadget2_exact_a_norm)
-    max_diff = np.max(diff)
-    if max_diff > 2e-6:
-        print "Comparing different exact accelerations !"
-        print "Median=", np.median(diff), "Mean=", np.mean(diff), "99%=", np.percentile(diff, 99)
-        print "max difference ( relative diff =", max_diff, "):"
-        #index = np.argmax(exact_a[:,0]**2 + exact_a[:,1]**2 + exact_a[:,2]**2 - gadget2_a_exact[:,0]**2 - gadget2_a_exact[:,1]**2 - gadget2_a_exact[:,2]**2)
-        index = np.argmax(diff)
-        print "a_exact --- Gadget2:", gadget2_a_exact[index,:], "exact:", exact_a[index,:]
-        print "pos ---     Gadget2: (id=%d):"%gadget2_ids[index], gadget2_pos[index,:], "exact (id=%d):"%gadget2_ids[index], gadget2_pos[index,:],"\n"
-
-    
-    # Compute the error norm
-    diff = gadget2_a_exact - gadget2_a_grav
-
-    norm_diff = np.sqrt(diff[:,0]**2 + diff[:,1]**2 + diff[:,2]**2)
-    norm_a = np.sqrt(gadget2_a_exact[:,0]**2 + gadget2_a_exact[:,1]**2 + gadget2_a_exact[:,2]**2)
-
-    norm_error = norm_diff / norm_a
-    error_x = abs(diff[:,0]) / norm_a
-    error_y = abs(diff[:,1]) / norm_a
-    error_z = abs(diff[:,2]) / norm_a
-    
-    # Bin the error
-    norm_error_hist,_ = np.histogram(norm_error, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_x_hist,_ = np.histogram(error_x, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_y_hist,_ = np.histogram(error_y, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_z_hist,_ = np.histogram(error_z, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    
-    norm_median = np.median(norm_error)
-    median_x = np.median(error_x)
-    median_y = np.median(error_y)
-    median_z = np.median(error_z)
-
-    norm_per99 = np.percentile(norm_error,99)
-    per99_x = np.percentile(error_x,99)
-    per99_y = np.percentile(error_y,99)
-    per99_z = np.percentile(error_z,99)
-
-    norm_max = np.max(norm_error)
-    max_x = np.max(error_x)
-    max_y = np.max(error_y)
-    max_z = np.max(error_z)
-
-    print "Gadget-2 ---- "
-    print "Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max)
-    print "X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x)
-    print "Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y)
-    print "Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z)
-    print ""
-
-    plt.subplot(231)    
-    plt.text(min_error * 1.5, 1.55, "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(norm_median, norm_per99), ha="left", va="top", alpha=0.8)
-    plt.semilogx(bins, norm_error_hist, 'k--', label="Gadget-2", alpha=0.8)
-    plt.subplot(232)
-    plt.semilogx(bins, error_x_hist, 'k--', label="Gadget-2", alpha=0.8)
-    plt.text(min_error * 1.5, 1.55, "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_x, per99_x), ha="left", va="top", alpha=0.8)
-    plt.subplot(233)    
-    plt.semilogx(bins, error_y_hist, 'k--', label="Gadget-2", alpha=0.8)
-    plt.text(min_error * 1.5, 1.55, "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_y, per99_y), ha="left", va="top", alpha=0.8)
-    plt.subplot(234)    
-    plt.semilogx(bins, error_z_hist, 'k--', label="Gadget-2", alpha=0.8)
-    plt.text(min_error * 1.5, 1.55, "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_z, per99_z), ha="left", va="top", alpha=0.8)
-    
-    count += 1
-
-
-# Plot the different histograms
-for i in range(num_order):
-    data = np.loadtxt(order_list[i])
-    ids = data[:,0]
-    pos = data[:,1:4]
-    a_grav = data[:, 4:7]
-    pot = data[:, 7]
-
-    # Sort stuff
-    sort_index = np.argsort(ids)
-    ids = ids[sort_index]
-    pos = pos[sort_index, :]
-    a_grav = a_grav[sort_index, :]        
-    pot = pot[sort_index]
-
-    # Cross-checks
-    if not np.array_equal(exact_ids, ids):
-        print "Comparing different IDs !"
-
-    if np.max(np.abs(exact_pos - pos)/np.abs(pos)) > 1e-6:
-        print "Comparing different positions ! max difference:"
-        index = np.argmax(exact_pos[:,0]**2 + exact_pos[:,1]**2 + exact_pos[:,2]**2 - pos[:,0]**2 - pos[:,1]**2 - pos[:,2]**2)
-        print "SWIFT (id=%d):"%ids[index], pos[index,:], "exact (id=%d):"%exact_ids[index], exact_pos[index,:], "\n"
-    
-    # Compute the error norm
-    diff = exact_a - a_grav
-    diff_pot = exact_pot - pot
-
-    # Correct for different normalization of potential
-    print "Difference in normalization of potential:", np.mean(diff_pot),
-    print "std_dev=", np.std(diff_pot), "99-percentile:", np.percentile(diff_pot, 99)-np.median(diff_pot), "1-percentile:", np.median(diff_pot) - np.percentile(diff_pot, 1)
-
-    exact_pot -= np.mean(diff_pot)
-    diff_pot = exact_pot - pot
-
-    norm_diff = np.sqrt(diff[:,0]**2 + diff[:,1]**2 + diff[:,2]**2)
-
-    norm_error = norm_diff / exact_a_norm
-    error_x = abs(diff[:,0]) / exact_a_norm
-    error_y = abs(diff[:,1]) / exact_a_norm
-    error_z = abs(diff[:,2]) / exact_a_norm
-    error_pot = abs(diff_pot) / abs(exact_pot)
-    
-    # Bin the error
-    norm_error_hist,_ = np.histogram(norm_error, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_x_hist,_ = np.histogram(error_x, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_y_hist,_ = np.histogram(error_y, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_z_hist,_ = np.histogram(error_z, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-    error_pot_hist,_ = np.histogram(error_pot, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
-
-    norm_median = np.median(norm_error)
-    median_x = np.median(error_x)
-    median_y = np.median(error_y)
-    median_z = np.median(error_z)
-    median_pot = np.median(error_pot)
-
-    norm_per99 = np.percentile(norm_error,99)
-    per99_x = np.percentile(error_x,99)
-    per99_y = np.percentile(error_y,99)
-    per99_z = np.percentile(error_z,99)
-    per99_pot = np.percentile(error_pot, 99)
-
-    norm_max = np.max(norm_error)
-    max_x = np.max(error_x)
-    max_y = np.max(error_y)
-    max_z = np.max(error_z)
-    max_pot = np.max(error_pot)
-
-    print "Order %d ---- "%order[i]
-    print "Norm: median= %f 99%%= %f max= %f"%(norm_median, norm_per99, norm_max)
-    print "X   : median= %f 99%%= %f max= %f"%(median_x, per99_x, max_x)
-    print "Y   : median= %f 99%%= %f max= %f"%(median_y, per99_y, max_y)
-    print "Z   : median= %f 99%%= %f max= %f"%(median_z, per99_z, max_z)
-    print "Pot : median= %f 99%%= %f max= %f"%(median_pot, per99_pot, max_pot)
-    print ""
-    
-    plt.subplot(231)    
-    plt.semilogx(bins, error_x_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
-    plt.text(min_error * 1.5, 1.5 - count/10., "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_x, per99_x), ha="left", va="top", color=cols[i])
-    plt.subplot(232)    
-    plt.semilogx(bins, error_y_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
-    plt.text(min_error * 1.5, 1.5 - count/10., "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_y, per99_y), ha="left", va="top", color=cols[i])
-    plt.subplot(233)    
-    plt.semilogx(bins, error_z_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
-    plt.text(min_error * 1.5, 1.5 - count/10., "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_z, per99_z), ha="left", va="top", color=cols[i])
-    plt.subplot(234)
-    plt.semilogx(bins, norm_error_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
-    plt.text(min_error * 1.5, 1.5 - count/10., "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(norm_median, norm_per99), ha="left", va="top", color=cols[i])
-    plt.subplot(235)    
-    plt.semilogx(bins, error_pot_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
-    plt.text(min_error * 1.5, 1.5 - count/10., "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$"%(median_pot, per99_pot), ha="left", va="top", color=cols[i])
-
-    count += 1
-
-plt.subplot(231)    
-plt.xlabel("$\delta a_x/|\overrightarrow{a}_{exact}|$")
-#plt.ylabel("Density")
-plt.xlim(min_error, max_error)
-plt.ylim(0,1.75)
-#plt.legend(loc="center left")
-plt.subplot(232)    
-plt.xlabel("$\delta a_y/|\overrightarrow{a}_{exact}|$")
-#plt.ylabel("Density")
-plt.xlim(min_error, max_error)
-plt.ylim(0,1.75)
-#plt.legend(loc="center left")
-plt.subplot(233)    
-plt.xlabel("$\delta a_z/|\overrightarrow{a}_{exact}|$")
-#plt.ylabel("Density")
-plt.xlim(min_error, max_error)
-plt.ylim(0,1.75)
-plt.subplot(234)
-plt.xlabel("$|\delta \overrightarrow{a}|/|\overrightarrow{a}_{exact}|$")
-#plt.ylabel("Density")
-plt.xlim(min_error, max_error)
-plt.ylim(0,2.5)
-plt.legend(loc="upper left")
-plt.subplot(235)    
-plt.xlabel("$\delta \phi/\phi_{exact}$")
-#plt.ylabel("Density")
-plt.xlim(min_error, max_error)
-plt.ylim(0,1.75)
-#plt.legend(loc="center left")
-
-
-
-plt.savefig("gravity_checks_step%.4d.png"%step, dpi=200)
-plt.savefig("gravity_checks_step%.4d.pdf"%step, dpi=200)
diff --git a/examples/plot_scaling_results.py b/examples/plot_scaling_results.py
deleted file mode 100755
index e39f0d2d0c00eecf7680b2f090bd2c0aa29ed8bb..0000000000000000000000000000000000000000
--- a/examples/plot_scaling_results.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/env python
-#
-# Usage:
-#  python plot_scaling_results.py input-file1-ext input-file2-ext ...
-#
-# Description:
-# Plots speed up, parallel efficiency and time to solution given a "timesteps" output file generated by SWIFT.
-# 
-# Example:
-# python plot_scaling_results.py _hreads_cosma_stdout.txt _threads_knl_stdout.txt
-# 
-# The working directory should contain files 1_threads_cosma_stdout.txt - 64_threads_cosma_stdout.txt and 1_threads_knl_stdout.txt - 64_threads_knl_stdout.txt, i.e wall clock time for each run using a given number of threads
-
-import sys
-import glob
-import re
-import numpy as np
-import matplotlib.pyplot as plt
-import scipy.stats
-import ntpath
-
-params = {'axes.labelsize': 14,
-'axes.titlesize': 18,
-'font.size': 12,
-'legend.fontsize': 12,
-'xtick.labelsize': 14,
-'ytick.labelsize': 14,
-'text.usetex': True,
-'figure.subplot.left'    : 0.055,
-'figure.subplot.right'   : 0.98  ,
-'figure.subplot.bottom'  : 0.05  ,
-'figure.subplot.top'     : 0.95  ,
-'figure.subplot.wspace'  : 0.14  ,
-'figure.subplot.hspace'  : 0.12  ,
-'lines.markersize' : 6,
-'lines.linewidth' : 3.,
-'text.latex.unicode': True
-}
-plt.rcParams.update(params)
-plt.rc('font',**{'family':'sans-serif','sans-serif':['Times']})
-
-version = []
-branch = []
-revision = []
-hydro_scheme = []
-hydro_kernel = []
-hydro_neighbours = []
-hydro_eta = []
-threadList = []
-hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',
-           '#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466',
-           '#4477AA']
-linestyle = (hexcols[0],hexcols[1],hexcols[3],hexcols[5],hexcols[6],hexcols[8],hexcols[2],hexcols[4],hexcols[7],hexcols[9])
-numTimesteps = 0
-legendTitle = ' '
-
-inputFileNames = []
-
-# Work out how many data series there are
-if len(sys.argv) == 1:
-  print "Please specify an input file in the arguments."
-  sys.exit()
-else:
-  for fileName in sys.argv[1:]:
-    inputFileNames.append(fileName)
-  numOfSeries = int(len(sys.argv) - 1)
-
-# Get the names of the branch, Git revision, hydro scheme and hydro kernel
-def parse_header(inputFile):
-  with open(inputFile, 'r') as f:
-    found_end = False
-    for line in f:
-      if 'Branch:' in line:
-        s = line.split()
-        line = s[2:]
-        branch.append(" ".join(line))
-      elif 'Revision:' in line:
-        s = line.split() 
-        revision.append(s[2])
-      elif 'Hydrodynamic scheme:' in line:
-        line = line[2:-1]
-        s = line.split()
-        line = s[2:]
-        hydro_scheme.append(" ".join(line))
-      elif 'Hydrodynamic kernel:' in line:
-        line = line[2:-1]
-        s = line.split()
-        line = s[2:5]
-        hydro_kernel.append(" ".join(line))
-      elif 'neighbours:' in line:
-        s = line.split() 
-        hydro_neighbours.append(s[4])
-      elif 'Eta:' in line:
-        s = line.split() 
-        hydro_eta.append(s[2])
-  return
-
-# Parse file and return total time taken, speed up and parallel efficiency
-def parse_files():
-  
-  totalTime = []
-  sumTotal = []
-  speedUp = []
-  parallelEff = []
- 
-  for i in range(0,numOfSeries): # Loop over each data series
-
-    # Get path to set of files
-    path, name = ntpath.split(inputFileNames[i])
-
-    # Get each file that starts with the cmd line arg
-    file_list = glob.glob(inputFileNames[i] + "*")
-   
-    threadList.append([])
-
-    # Remove path from file names 
-    for j in range(0,len(file_list)):
-      p, filename = ntpath.split(file_list[j])
-      file_list[j] = filename
-
-    # Create a list of threads using the list of files
-    for fileName in file_list:
-      s = re.split(r'[_.]+',fileName)
-      threadList[i].append(int(s[1]))
-  
-    # Re-add path once each file has been found
-    if len(path) != 0:
-      for j in range(0,len(file_list)):
-        file_list[j] = path + '/' + file_list[j]
-
-    # Sort the thread list in ascending order and save the indices
-    sorted_indices = np.argsort(threadList[i])
-    threadList[i].sort()
-
-    # Sort the file list in ascending order acording to the thread number
-    file_list = [ file_list[j] for j in sorted_indices]
-
-    parse_header(file_list[0])
-
-    branch[i] = branch[i].replace("_", "\\_") 
-   
-    #version.append("$\\textrm{%s}$"%str(branch[i]))# + " " + revision[i])# + "\n" + hydro_scheme[i] + 
-#                   "\n" + hydro_kernel[i] + r", $N_{ngb}=%d$"%float(hydro_neighbours[i]) + 
-#                   r", $\eta=%.3f$"%float(hydro_eta[i]))
-    totalTime.append([])
-    speedUp.append([])
-    parallelEff.append([])
-    
-    # Loop over all files for a given series and load the times
-    for j in range(0,len(file_list)):
-      times = np.loadtxt(file_list[j],usecols=(9,))
-      updates = np.loadtxt(file_list[j],usecols=(6,))
-      totalTime[i].append(np.sum(times))
-      
-    sumTotal.append(np.sum(totalTime[i]))
-
-  # Sort the total times in descending order
-  sorted_indices = np.argsort(sumTotal)[::-1]
-  
-  totalTime = [ totalTime[j] for j in sorted_indices]
-  branchNew = [ branch[j] for j in sorted_indices]
-  
-  for i in range(0,numOfSeries):
-    version.append("$\\textrm{%s}$"%str(branchNew[i]))
-
-  global numTimesteps
-  numTimesteps = len(times)
-
-  # Find speed-up and parallel efficiency
-  for i in range(0,numOfSeries):
-    for j in range(0,len(file_list)):
-      speedUp[i].append(totalTime[i][0] / totalTime[i][j])
-      parallelEff[i].append(speedUp[i][j] / threadList[i][j])
-
-  return (totalTime,speedUp,parallelEff)
-
-def print_results(totalTime,parallelEff,version):
- 
-  for i in range(0,numOfSeries):
-    print " "
-    print "------------------------------------"
-    print version[i]
-    print "------------------------------------"
-    print "Wall clock time for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
-    
-    for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(totalTime[i][j])
-    
-    print " "
-    print "------------------------------------"
-    print "Parallel Efficiency for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
-    
-    for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j])
-
-  return
-
-# Returns a lighter/darker version of the colour
-def color_variant(hex_color, brightness_offset=1):
-  
-  rgb_hex = [hex_color[x:x+2] for x in [1, 3, 5]]
-  new_rgb_int = [int(hex_value, 16) + brightness_offset for hex_value in rgb_hex]
-  new_rgb_int = [min([255, max([0, i])]) for i in new_rgb_int] # make sure new values are between 0 and 255
-  # hex() produces "0x88", we want just "88"
-  
-  return "#" + "".join([hex(i)[2:] for i in new_rgb_int])
-
-def plot_results(totalTime,speedUp,parallelEff,numSeries):
-
-  fig, axarr = plt.subplots(2, 2, figsize=(10,10), frameon=True)
-  speedUpPlot = axarr[0, 0]
-  parallelEffPlot = axarr[0, 1]
-  totalTimePlot = axarr[1, 0]
-  emptyPlot = axarr[1, 1]
-  
-  # Plot speed up
-  speedUpPlot.plot(threadList[0],threadList[0], linestyle='--', lw=1.5, color='0.2')
-  for i in range(0,numSeries):
-    speedUpPlot.plot(threadList[0],speedUp[i],linestyle[i],label=version[i])
-
-  speedUpPlot.set_ylabel("${\\rm Speed\\textendash up}$", labelpad=0.)
-  speedUpPlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  speedUpPlot.set_xlim([0.7,threadList[0][-1]+1])
-  speedUpPlot.set_ylim([0.7,threadList[0][-1]+1])
-
-  # Plot parallel efficiency
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [1,1], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.9,0.9], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.75,0.75], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.5,0.5], 'k--', lw=1.5, color='0.2')
-  for i in range(0,numSeries):
-    parallelEffPlot.plot(threadList[0],parallelEff[i],linestyle[i])
-
-  parallelEffPlot.set_xscale('log')
-  parallelEffPlot.set_ylabel("${\\rm Parallel~efficiency}$", labelpad=0.)
-  parallelEffPlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  parallelEffPlot.set_ylim([0,1.1])
-  parallelEffPlot.set_xlim([0.9,10**(np.floor(np.log10(threadList[0][-1]))+0.5)])
-
-  # Plot time to solution     
-  for i in range(0,numOfSeries):
-    pts = [1, 10**np.floor(np.log10(threadList[i][-1])+1)]
-    totalTimePlot.loglog(pts,totalTime[i][0]/pts, 'k--', lw=1., color='0.2')
-    totalTimePlot.loglog(threadList[i],totalTime[i],linestyle[i],label=version[i])
-
-  y_min = 10**np.floor(np.log10(np.min(totalTime[:][0])*0.6))
-  y_max = 1.0*10**np.floor(np.log10(np.max(totalTime[:][0]) * 1.5)+1)
-  totalTimePlot.set_xscale('log')
-  totalTimePlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  totalTimePlot.set_ylabel("${\\rm Time~to~solution}~[{\\rm ms}]$", labelpad=0.)
-  totalTimePlot.set_xlim([0.9, 10**(np.floor(np.log10(threadList[0][-1]))+0.5)])
-  totalTimePlot.set_ylim(y_min, y_max)
-
-  totalTimePlot.legend(bbox_to_anchor=(1.21, 0.97), loc=2, borderaxespad=0.,prop={'size':12}, frameon=False,title=legendTitle)
-  emptyPlot.axis('off')
-  
-  for i, txt in enumerate(threadList[0]):
-    if 2**np.floor(np.log2(threadList[0][i])) == threadList[0][i]: # only powers of 2
-      speedUpPlot.annotate("$%s$"%txt, (threadList[0][i],speedUp[0][i]), (threadList[0][i],speedUp[0][i] + 0.3), color=hexcols[0])
-      parallelEffPlot.annotate("$%s$"%txt, (threadList[0][i],parallelEff[0][i]), (threadList[0][i], parallelEff[0][i]+0.02), color=hexcols[0])
-      totalTimePlot.annotate("$%s$"%txt, (threadList[0][i],totalTime[0][i]), (threadList[0][i], totalTime[0][i]*1.1), color=hexcols[0])
-
-  #fig.suptitle("Thread Speed Up, Parallel Efficiency and Time To Solution for {} Time Steps of Cosmo Volume\n Cmd Line: {}, Platform: {}".format(numTimesteps),cmdLine,platform))
-  fig.suptitle("${\\rm Speed\\textendash up,~parallel~efficiency~and~time~to~solution~for}~%d~{\\rm time\\textendash steps}$"%numTimesteps, fontsize=16)
-
-  return
-
-# Calculate results
-(totalTime,speedUp,parallelEff) = parse_files()
-
-legendTitle = version[0]
-
-plot_results(totalTime,speedUp,parallelEff,numOfSeries)
-
-print_results(totalTime,parallelEff,version)
-
-# And plot
-plt.show()
diff --git a/examples/plot_scaling_results_breakdown.py b/examples/plot_scaling_results_breakdown.py
deleted file mode 100755
index 6a87e42bcd393d543187e768e31a15bc56f1ae6a..0000000000000000000000000000000000000000
--- a/examples/plot_scaling_results_breakdown.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/env python
-#
-# Usage:
-#  python plot_scaling_results.py input-file1-ext input-file2-ext ...
-#
-# Description:
-# Plots speed up, parallel efficiency and time to solution given a "timesteps" output file generated by SWIFT.
-# 
-# Example:
-# python plot_scaling_results.py _hreads_cosma_stdout.txt _threads_knl_stdout.txt
-# 
-# The working directory should contain files 1_threads_cosma_stdout.txt - 64_threads_cosma_stdout.txt and 1_threads_knl_stdout.txt - 64_threads_knl_stdout.txt, i.e wall clock time for each run using a given number of threads
-
-import sys
-import glob
-import re
-import numpy as np
-import matplotlib.pyplot as plt
-import scipy.stats
-import ntpath
-
-params = {'axes.labelsize': 14,
-'axes.titlesize': 18,
-'font.size': 12,
-'legend.fontsize': 12,
-'xtick.labelsize': 14,
-'ytick.labelsize': 14,
-'text.usetex': True,
-'figure.subplot.left'    : 0.055,
-'figure.subplot.right'   : 0.98  ,
-'figure.subplot.bottom'  : 0.05  ,
-'figure.subplot.top'     : 0.95  ,
-'figure.subplot.wspace'  : 0.14  ,
-'figure.subplot.hspace'  : 0.12  ,
-'lines.markersize' : 6,
-'lines.linewidth' : 3.,
-'text.latex.unicode': True
-}
-plt.rcParams.update(params)
-plt.rc('font',**{'family':'sans-serif','sans-serif':['Times']})
-
-version = []
-branch = []
-revision = []
-hydro_scheme = []
-hydro_kernel = []
-hydro_neighbours = []
-hydro_eta = []
-threadList = []
-hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',
-           '#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466',
-           '#4477AA']
-linestyle = (hexcols[0],hexcols[1],hexcols[3],hexcols[5],hexcols[6],hexcols[8],hexcols[2],hexcols[4],hexcols[7],hexcols[9])
-numTimesteps = 0
-legendTitle = ' '
-
-inputFileNames = []
-
-# Work out how many data series there are
-if len(sys.argv) == 1:
-  print "Please specify an input file in the arguments."
-  sys.exit()
-else:
-  for fileName in sys.argv[1:]:
-    inputFileNames.append(fileName)
-  numOfSeries = int(len(sys.argv) - 1)
-
-# Get the names of the branch, Git revision, hydro scheme and hydro kernel
-def parse_header(inputFile):
-  with open(inputFile, 'r') as f:
-    found_end = False
-    for line in f:
-      if 'Branch:' in line:
-        s = line.split()
-        line = s[2:]
-        branch.append(" ".join(line))
-      elif 'Revision:' in line:
-        s = line.split() 
-        revision.append(s[2])
-      elif 'Hydrodynamic scheme:' in line:
-        line = line[2:-1]
-        s = line.split()
-        line = s[2:]
-        hydro_scheme.append(" ".join(line))
-      elif 'Hydrodynamic kernel:' in line:
-        line = line[2:-1]
-        s = line.split()
-        line = s[2:5]
-        hydro_kernel.append(" ".join(line))
-      elif 'neighbours:' in line:
-        s = line.split() 
-        hydro_neighbours.append(s[4])
-      elif 'Eta:' in line:
-        s = line.split() 
-        hydro_eta.append(s[2])
-  return
-
-# Parse file and return total time taken, speed up and parallel efficiency
-def parse_files():
-  
-  totalTime = []
-  sumTotal = []
-  speedUp = []
-  parallelEff = []
- 
-  for i in range(0,numOfSeries): # Loop over each data series
-
-    # Get path to set of files
-    path, name = ntpath.split(inputFileNames[i])
-
-    # Get each file that starts with the cmd line arg
-    file_list = glob.glob(inputFileNames[i] + "*")
-   
-    threadList.append([])
-
-    # Remove path from file names 
-    for j in range(0,len(file_list)):
-      p, filename = ntpath.split(file_list[j])
-      file_list[j] = filename
-
-    # Create a list of threads using the list of files
-    for fileName in file_list:
-      s = re.split(r'[_.]+',fileName)
-      threadList[i].append(int(s[1]))
-  
-    # Re-add path once each file has been found
-    if len(path) != 0:
-      for j in range(0,len(file_list)):
-        file_list[j] = path + '/' + file_list[j]
-
-    # Sort the thread list in ascending order and save the indices
-    sorted_indices = np.argsort(threadList[i])
-    threadList[i].sort()
-
-    # Sort the file list in ascending order acording to the thread number
-    file_list = [ file_list[j] for j in sorted_indices]
-
-    parse_header(file_list[0])
-
-    branch[i] = branch[i].replace("_", "\\_") 
-   
-    
-    #version.append("$\\textrm{%s}$"%str(branch[i]))# + " " + revision[i])# + "\n" + hydro_scheme[i] + 
-#                   "\n" + hydro_kernel[i] + r", $N_{ngb}=%d$"%float(hydro_neighbours[i]) + 
-#                   r", $\eta=%.3f$"%float(hydro_eta[i]))
-    totalTime.append([])
-    speedUp.append([])
-    parallelEff.append([])
-   
-    # Loop over all files for a given series and load the times
-    for j in range(0,len(file_list)):
-      times = np.loadtxt(file_list[j],usecols=(9,))
-      updates = np.loadtxt(file_list[j],usecols=(6,))
-      totalTime[i].append(np.sum(times))
-      
-    sumTotal.append(np.sum(totalTime[i]))
-
-  # Sort the total times in descending order
-  sorted_indices = np.argsort(sumTotal)[::-1]
-  
-  totalTime = [ totalTime[j] for j in sorted_indices]
-  branchNew = [ branch[j] for j in sorted_indices]
-  
-  for i in range(0,numOfSeries):
-    version.append("$\\textrm{%s}$"%str(branchNew[i]))
-
-  global numTimesteps
-  numTimesteps = len(times)
-
-  # Find speed-up and parallel efficiency
-  for i in range(0,numOfSeries):
-    for j in range(0,len(file_list)):
-      speedUp[i].append(totalTime[i][0] / totalTime[i][j])
-      parallelEff[i].append(speedUp[i][j] / threadList[i][j])
-
-  return (totalTime,speedUp,parallelEff)
-
-def print_results(totalTime,parallelEff,version):
- 
-  for i in range(0,numOfSeries):
-    print " "
-    print "------------------------------------"
-    print version[i]
-    print "------------------------------------"
-    print "Wall clock time for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
-    
-    for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(totalTime[i][j])
-    
-    print " "
-    print "------------------------------------"
-    print "Parallel Efficiency for: {} time steps".format(numTimesteps)
-    print "------------------------------------"
-    
-    for j in range(0,len(threadList[i])):
-      print str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j])
-
-  return
-
-# Returns a lighter/darker version of the colour
-def color_variant(hex_color, brightness_offset=1):
-  
-  rgb_hex = [hex_color[x:x+2] for x in [1, 3, 5]]
-  new_rgb_int = [int(hex_value, 16) + brightness_offset for hex_value in rgb_hex]
-  new_rgb_int = [min([255, max([0, i])]) for i in new_rgb_int] # make sure new values are between 0 and 255
-  # hex() produces "0x88", we want just "88"
-  
-  return "#" + "".join([hex(i)[2:] for i in new_rgb_int])
-
-def plot_results(totalTime,speedUp,parallelEff,numSeries):
-
-  fig, axarr = plt.subplots(2, 2, figsize=(10,10), frameon=True)
-  speedUpPlot = axarr[0, 0]
-  parallelEffPlot = axarr[0, 1]
-  totalTimePlot = axarr[1, 0]
-  emptyPlot = axarr[1, 1]
-  
-  # Plot speed up
-  speedUpPlot.plot(threadList[0],threadList[0], linestyle='--', lw=1.5, color='0.2')
-  for i in range(0,numSeries):
-    speedUpPlot.plot(threadList[0],speedUp[i],linestyle[i],label=version[i])
-
-  speedUpPlot.set_ylabel("${\\rm Speed\\textendash up}$", labelpad=0.)
-  speedUpPlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  speedUpPlot.set_xlim([0.7,threadList[0][-1]+1])
-  speedUpPlot.set_ylim([0.7,threadList[0][-1]+1])
-
-  # Plot parallel efficiency
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [1,1], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.9,0.9], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.75,0.75], 'k--', lw=1.5, color='0.2')
-  parallelEffPlot.plot([threadList[0][0], 10**np.floor(np.log10(threadList[0][-1])+1)], [0.5,0.5], 'k--', lw=1.5, color='0.2')
-  for i in range(0,numSeries):
-    parallelEffPlot.plot(threadList[0],parallelEff[i],linestyle[i])
-
-  parallelEffPlot.set_xscale('log')
-  parallelEffPlot.set_ylabel("${\\rm Parallel~efficiency}$", labelpad=0.)
-  parallelEffPlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  parallelEffPlot.set_ylim([0,1.1])
-  parallelEffPlot.set_xlim([0.9,10**(np.floor(np.log10(threadList[0][-1]))+0.5)])
-
-  # Plot time to solution     
-  for i in range(0,numSeries):
-    for j in range(0,len(threadList[0])):
-      totalTime[i][j] = totalTime[i][j] * threadList[i][j]
-      if i > 1:
-        totalTime[i][j] = totalTime[i][j] + totalTime[i-1][j]
-    totalTimePlot.plot(threadList[0],totalTime[i],linestyle[i],label=version[i])
-
-    if i > 1:
-      colour = color_variant(linestyle[i],100)
-      totalTimePlot.fill_between(threadList[0],np.array(totalTime[i]),np.array(totalTime[i-1]), facecolor=colour)
-    elif i==1:
-      colour = color_variant(linestyle[i],100)
-      totalTimePlot.fill_between(threadList[0], totalTime[i],facecolor=colour)
-
-  totalTimePlot.set_xscale('log')
-  totalTimePlot.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
-  totalTimePlot.set_xlabel("${\\rm Threads}$", labelpad=0.)
-  totalTimePlot.set_ylabel("${\\rm Time~to~solution~x~No.~of~cores}~[{\\rm ms}]$", labelpad=0.)
-  totalTimePlot.set_xlim([0.9, 10**(np.floor(np.log10(threadList[0][-1]))+0.5)])
-  #totalTimePlot.set_ylim(y_min, y_max)
-
-  totalTimePlot.legend(bbox_to_anchor=(1.21, 0.97), loc=2, borderaxespad=0.,prop={'size':12}, frameon=False,title=legendTitle)
-  emptyPlot.axis('off')
-  
-  for i, txt in enumerate(threadList[0]):
-    if 2**np.floor(np.log2(threadList[0][i])) == threadList[0][i]: # only powers of 2
-      speedUpPlot.annotate("$%s$"%txt, (threadList[0][i],speedUp[0][i]), (threadList[0][i],speedUp[0][i] + 0.3), color=hexcols[0])
-      parallelEffPlot.annotate("$%s$"%txt, (threadList[0][i],parallelEff[0][i]), (threadList[0][i], parallelEff[0][i]+0.02), color=hexcols[0])
-      totalTimePlot.annotate("$%s$"%txt, (threadList[0][i],totalTime[0][i]), (threadList[0][i], totalTime[0][i]*1.1), color=hexcols[0])
-
-  #fig.suptitle("Thread Speed Up, Parallel Efficiency and Time To Solution for {} Time Steps of Cosmo Volume\n Cmd Line: {}, Platform: {}".format(numTimesteps),cmdLine,platform))
-  fig.suptitle("${\\rm Speed\\textendash up,~parallel~efficiency~and~time~to~solution~x~no.~of~cores~for}~%d~{\\rm time\\textendash steps}$"%numTimesteps, fontsize=16)
-
-  return
-
-# Calculate results
-(totalTime,speedUp,parallelEff) = parse_files()
-
-legendTitle = version[0]
-
-plot_results(totalTime,speedUp,parallelEff,numOfSeries)
-
-print_results(totalTime,parallelEff,version)
-
-# And plot
-plt.show()
diff --git a/examples/plot_task_dependencies.sh b/examples/plot_task_dependencies.sh
deleted file mode 100755
index 77784d8a9cdd3720621c9ad35c4cfbdaf0167ff1..0000000000000000000000000000000000000000
--- a/examples/plot_task_dependencies.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-#  Creates a graphic from the task graph file dependency_graph.dot.
-#  Requires the graphviz command "dot".
-
-if [ ! -e dependency_graph.dot ]; then
-    echo "Missing task-graph output 'dependency_graph.dot'! Cannot generate figure."
-else 
-    dot -Tpng dependency_graph.dot -o task_graph.png
-    echo "Output written to task_graph.png"
-fi
-
-exit
diff --git a/m4/ax_asm_arm_cntvct.m4 b/m4/ax_asm_arm_cntvct.m4
new file mode 100644
index 0000000000000000000000000000000000000000..9a9c7d799d46ab1654a804dc3ee09ce5b616df2c
--- /dev/null
+++ b/m4/ax_asm_arm_cntvct.m4
@@ -0,0 +1,43 @@
+#
+# SYNOPSIS
+#
+#   AX_ASM_ARM_CNTVCT
+#
+# DESCRIPTION
+#
+#   Check whether the CNTVCT_EL0 exists on this platform. Defines
+#   HAVE_ARMV8_CNTVCT_EL0 if true.
+#
+# LICENSE
+#
+#   Copyright (c) 2019 Matthieu Schaller <schaller@strw.leidenuniv.nl>
+#
+#   Copying and distribution of this file, with or without modification, are
+#   permitted in any medium without royalty provided the copyright notice
+#   and this notice are preserved. This file is offered as-is, without any
+#   warranty.
+
+#serial 1
+
+AC_DEFUN([AX_ASM_ARM_CNTVCT],
+[AC_CACHE_CHECK([for CNTVCT_EL0 asm instruction on ARM v8.1a],
+   [ax_cv_asm_arm_cntvct_works],
+    [AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdint.h>
+
+int
+main()
+{
+   uint64_t cc = 0;
+   __asm__ __volatile__("mrs %0,  CNTVCT_EL0" : "=r"(cc));
+   return 0;
+}
+    ]])],
+    [ax_cv_asm_arm_cntvct_works=yes],
+    [ax_cv_asm_arm_cntvct_works=no],
+    [ax_cv_asm_arm_cntvct_works=no])])
+if test "$ax_cv_asm_arm_cntvct_works" = "yes" ; then
+  AC_DEFINE([HAVE_ARMV8_CNTVCT_EL0], [1],
+    [Define to 1 if the ARM v8.1a instruction CNTVCT_EL0 exists.])
+fi
+])
diff --git a/m4/ax_asm_arm_pmccntr.m4 b/m4/ax_asm_arm_pmccntr.m4
new file mode 100644
index 0000000000000000000000000000000000000000..ded3bbbc04a5270acb8045d8375ccd2e5986ecd2
--- /dev/null
+++ b/m4/ax_asm_arm_pmccntr.m4
@@ -0,0 +1,43 @@
+#
+# SYNOPSIS
+#
+#   AX_ASM_ARM_PMCCNTR
+#
+# DESCRIPTION
+#
+#   Check whether the PMCCNTR_EL0 exists on this platform. Defines
+#   HAVE_ARMV8_PMCCNTR_EL0 if true.
+#
+# LICENSE
+#
+#   Copyright (c) 2019 Matthieu Schaller <schaller@strw.leidenuniv.nl>
+#
+#   Copying and distribution of this file, with or without modification, are
+#   permitted in any medium without royalty provided the copyright notice
+#   and this notice are preserved. This file is offered as-is, without any
+#   warranty.
+
+#serial 1
+
+AC_DEFUN([AX_ASM_ARM_PMCCNTR],
+[AC_CACHE_CHECK([for PMCCNTR_EL0 asm instruction on ARM v8.1a],
+   [ax_cv_asm_arm_pmccntr_works],
+    [AC_RUN_IFELSE([AC_LANG_SOURCE([[
+#include <stdint.h>
+
+int
+main()
+{
+   uint64_t cc = 0;
+   __asm__ __volatile__("mrs %0, PMCCNTR_EL0" : "=r"(cc));
+   return 0;
+}
+    ]])],
+    [ax_cv_asm_arm_pmccntr_works=yes],
+    [ax_cv_asm_arm_pmccntr_works=no],
+    [ax_cv_asm_arm_pmccntr_works=no])])
+if test "$ax_cv_asm_arm_pmccntr_works" = "yes" ; then
+  AC_DEFINE([HAVE_ARMV8_PMCCNTR_EL0], [1],
+    [Define to 1 if the ARM v8.1a instruction PMCCNTR_EL0 exists.])
+fi
+])
diff --git a/m4/ax_ext.m4 b/m4/ax_ext.m4
index 8da8c61decc9aa35737fb977def82d51ade5ef0c..0db44374f35e4605d6ca3da14ba22de0663cb87a 100644
--- a/m4/ax_ext.m4
+++ b/m4/ax_ext.m4
@@ -1,5 +1,5 @@
 # ===========================================================================
-#          http://www.gnu.org/software/autoconf-archive/ax_ext.html
+#          https://www.gnu.org/software/autoconf-archive/ax_ext.html
 # ===========================================================================
 #
 # SYNOPSIS
@@ -31,12 +31,13 @@
 #     HAVE_SHA / HAVE_AES / HAVE_AVX / HAVE_FMA3 / HAVE_FMA4 / HAVE_XOP
 #     HAVE_AVX2 / HAVE_AVX512_F / HAVE_AVX512_CD / HAVE_AVX512_PF
 #     HAVE_AVX512_ER / HAVE_AVX512_VL / HAVE_AVX512_BW / HAVE_AVX512_DQ
-#     HAVE_AVX512_IFMA / HAVE_AVX512_VBMI
+#     HAVE_AVX512_IFMA / HAVE_AVX512_VBMI / HAVE_ALTIVEC / HAVE_VSX
 #
 # LICENSE
 #
 #   Copyright (c) 2007 Christophe Tournayre <turn3r@users.sourceforge.net>
 #   Copyright (c) 2013,2015 Michael Petch <mpetch@capp-sysware.com>
+#   Copyright (c) 2017 Rafael de Lucena Valle <rafaeldelucena@gmail.com>
 #
 #   Copying and distribution of this file, with or without modification, are
 #   permitted in any medium without royalty provided the copyright notice
@@ -47,7 +48,7 @@
 #   the order of the flags when more than one is used. Given that we just
 #   set SIMD_FLAGS to the most specific value, rather than all accepted ones.
 
-#serial 15
+#serial 18
 
 AC_DEFUN([AX_EXT],
 [
@@ -59,19 +60,43 @@ AC_DEFUN([AX_EXT],
 
   case $host_cpu in
     powerpc*)
-      AC_CACHE_CHECK([whether altivec is supported], [ax_cv_have_altivec_ext],
+      AC_CACHE_CHECK([whether altivec is supported for old distros], [ax_cv_have_altivec_old_ext],
           [
             if test `/usr/sbin/sysctl -a 2>/dev/null| grep -c hw.optional.altivec` != 0; then
                 if test `/usr/sbin/sysctl -n hw.optional.altivec` = 1; then
-                  ax_cv_have_altivec_ext=yes
+                  ax_cv_have_altivec_old_ext=yes
                 fi
             fi
           ])
 
-          if test "$ax_cv_have_altivec_ext" = yes; then
+          if test "$ax_cv_have_altivec_old_ext" = yes; then
             AC_DEFINE(HAVE_ALTIVEC,,[Support Altivec instructions])
             AX_CHECK_COMPILE_FLAG(-faltivec, SIMD_FLAGS="$SIMD_FLAGS -faltivec", [])
           fi
+
+      AC_CACHE_CHECK([whether altivec is supported], [ax_cv_have_altivec_ext],
+          [
+            if test `LD_SHOW_AUXV=1 /bin/true 2>/dev/null|grep -c altivec` != 0; then
+              ax_cv_have_altivec_ext=yes
+            fi
+          ])
+
+          if test "$ax_cv_have_altivec_ext" = yes; then
+            AC_DEFINE(HAVE_ALTIVEC,,[Support Altivec instructions])
+            AX_CHECK_COMPILE_FLAG(-maltivec, SIMD_FLAGS="$SIMD_FLAGS -maltivec", [])
+          fi
+
+      AC_CACHE_CHECK([whether vsx is supported], [ax_cv_have_vsx_ext],
+          [
+            if test `LD_SHOW_AUXV=1 /bin/true 2>/dev/null|grep -c vsx` != 0; then
+                ax_cv_have_vsx_ext=yes
+            fi
+          ])
+
+          if test "$ax_cv_have_vsx_ext" = yes; then
+            AC_DEFINE(HAVE_VSX,,[Support VSX instructions])
+            AX_CHECK_COMPILE_FLAG(-mvsx, SIMD_FLAGS="$SIMD_FLAGS -mvsx", [])
+          fi
     ;;
 
     i[[3456]]86*|x86_64*|amd64*)
@@ -139,7 +164,7 @@ AC_DEFUN([AX_EXT],
         ax_cv_have_sse_os_support_ext=no,
         if test "$((0x$edx_cpuid1>>25&0x01))" = 1; then
           AC_LANG_PUSH([C])
-          AC_TRY_RUN([
+          AC_RUN_IFELSE([AC_LANG_SOURCE([[
 #include <signal.h>
 #include <stdlib.h>
             /* No way at ring1 to ring3 in protected mode to check the CR0 and CR4
@@ -151,10 +176,10 @@ AC_DEFUN([AX_EXT],
               /* SSE instruction xorps  %xmm0,%xmm0 */
               __asm__ __volatile__ (".byte 0x0f, 0x57, 0xc0");
               return 0;
-            }],
-            ax_cv_have_sse_os_support_ext=yes,
-            ax_cv_have_sse_os_support_ext=no,
-            ax_cv_have_sse_os_support_ext=no)
+            }]])],
+            [ax_cv_have_sse_os_support_ext=yes],
+            [ax_cv_have_sse_os_support_ext=no],
+            [ax_cv_have_sse_os_support_ext=no])
           AC_LANG_POP([C])
         fi
       ])
diff --git a/m4/ax_func_posix_memalign.m4 b/m4/ax_func_posix_memalign.m4
index bd60adcbc81a5ce5c9e68f71081e6872e5139b0a..2442ceca74c3e40ceaffb2859336527964b22b52 100644
--- a/m4/ax_func_posix_memalign.m4
+++ b/m4/ax_func_posix_memalign.m4
@@ -1,5 +1,5 @@
 # ===========================================================================
-#  http://www.gnu.org/software/autoconf-archive/ax_func_posix_memalign.html
+#  https://www.gnu.org/software/autoconf-archive/ax_func_posix_memalign.html
 # ===========================================================================
 #
 # SYNOPSIS
@@ -22,12 +22,12 @@
 #   and this notice are preserved. This file is offered as-is, without any
 #   warranty.
 
-#serial 7
+#serial 9
 
 AC_DEFUN([AX_FUNC_POSIX_MEMALIGN],
 [AC_CACHE_CHECK([for working posix_memalign],
   [ax_cv_func_posix_memalign_works],
-  [AC_TRY_RUN([
+  [AC_RUN_IFELSE([AC_LANG_SOURCE([[
 #include <stdlib.h>
 
 int
@@ -39,7 +39,7 @@ main ()
    * the size word. */
   exit (posix_memalign (&buffer, sizeof(void *), 123) != 0);
 }
-    ],
+    ]])],
     [ax_cv_func_posix_memalign_works=yes],
     [ax_cv_func_posix_memalign_works=no],
     [ax_cv_func_posix_memalign_works=no])])
diff --git a/m4/ax_gcc_archflag.m4 b/m4/ax_gcc_archflag.m4
index b91c9e8f4003ce7ee70a3f587b89df754f7302d5..ec600016ad3bd7a6afea4ab36c434a91172de9af 100644
--- a/m4/ax_gcc_archflag.m4
+++ b/m4/ax_gcc_archflag.m4
@@ -65,7 +65,7 @@
 #   modified version of the Autoconf Macro, you may extend this special
 #   exception to the GPL to apply to your modified version as well.
 
-#serial 21 (modified for SWIFT)
+#serial 22 (modified for SWIFT)
 
 AC_DEFUN([AX_GCC_ARCHFLAG],
 [AC_REQUIRE([AC_PROG_CC])
@@ -109,7 +109,7 @@ case $host_cpu in
 	    *3?6[[ae]]?:*:*:*) ax_gcc_arch="ivybridge core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
 	    *3?6[[cf]]?:*:*:*|*4?6[[56]]?:*:*:*) ax_gcc_arch="haswell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
 	    *3?6d?:*:*:*|*4?6[[7f]]?:*:*:*|*5?66?:*:*:*) ax_gcc_arch="broadwell core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
-	    *4?6[[de]]?:*:*:*) ax_gcc_arch="skylake haswell sandybridge core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;	
+	    *4?6[[de]]?:*:*:*|*5?6[[de]]?:*:*:*) ax_gcc_arch="skylake haswell sandybridge core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;	
 	    *5?6[[56]]?:*:*:*) ax_gcc_arch="skylake-avx512 skylake haswell sandybridge core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;	
             *8?6[[de]]?:*:*:*|*9?6[[de]]?:*:*:*) ax_gcc_arch="kabylake skylake broadwell haswell sandybridge core-avx2 core-avx-i corei7-avx corei7 core2 pentium-m pentium3 pentiumpro" ;;
 	    *1?6c?:*:*:*|*2?6[[67]]?:*:*:*|*3?6[[56]]?:*:*:*) ax_gcc_arch="bonnell atom core2 pentium-m pentium3 pentiumpro" ;;
@@ -201,6 +201,10 @@ case $host_cpu in
        *POWER4*|*power4*|*gq*) ax_gcc_arch="power4 970";;
        *POWER5*|*power5*|*gr*|*gs*) ax_gcc_arch="power5 power4 970";;
        603ev|8240) ax_gcc_arch="$cputype 603e 603";;
+       *POWER7*) ax_gcc_arch="power7";;
+       *POWER8*) ax_gcc_arch="power8";;
+       *POWER9*) ax_gcc_arch="power9";;
+       *POWER10*) ax_gcc_arch="power10";;
        *) ax_gcc_arch=$cputype ;;
      esac
      ax_gcc_arch="$ax_gcc_arch powerpc"
@@ -212,15 +216,15 @@ case $host_cpu in
      case $cpuimpl in
        0x42) case $cpuarch in
                8) case $cpuvar in
-                    0x0) ax_gcc_arch="thunderx2t99 vulcan armv8.1-a armv8-a+lse armv8-a native" ;;
+                    0x0) ax_gcc_arch="native" ;;
                   esac
                   ;;
              esac
              ;;
        0x43) case $cpuarch in
                8) case $cpuvar in
-                    0x0) ax_gcc_arch="thunderx armv8-a native" ;;
-                    0x1) ax_gcc_arch="thunderx+lse armv8.1-a armv8-a+lse armv8-a native" ;;
+                    0x0) ax_gcc_arch="native" ;;
+                    0x1) ax_gcc_arch="native" ;;
                   esac
                   ;;
              esac
diff --git a/src/Makefile.am b/src/Makefile.am
index 49f95c030faba90365256a799d3c39350bc6f8d8..d7e4249a7ff67132505e3a7df8a134d4cd8b266c 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -16,7 +16,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Add the non-standard paths to the included library headers
-AM_CFLAGS = $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS)
+AM_CFLAGS = $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS) $(GRACKLE_INCS)
 
 # Assign a "safe" version number
 AM_LDFLAGS = $(HDF5_LDFLAGS) $(FFTW_LIBS) -version-info 0:0:0
@@ -25,11 +25,11 @@ AM_LDFLAGS = $(HDF5_LDFLAGS) $(FFTW_LIBS) -version-info 0:0:0
 GIT_CMD = @GIT_CMD@
 
 # Additional dependencies for shared libraries.
-EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIB) $(GSL_LIBS)
+EXTRA_LIBS = $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(PROFILER_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS)
 
 # MPI libraries.
-MPI_LIBS = $(METIS_LIBS) $(MPI_THREAD_LIBS)
-MPI_FLAGS = -DWITH_MPI $(METIS_INCS)
+MPI_LIBS = $(PARMETIS_LIBS) $(METIS_LIBS) $(MPI_THREAD_LIBS)
+MPI_FLAGS = -DWITH_MPI $(PARMETIS_INCS) $(METIS_INCS)
 
 # Build the libswiftsim library
 lib_LTLIBRARIES = libswiftsim.la
@@ -41,40 +41,49 @@ endif
 # List required headers
 include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \
     engine.h swift.h serial_io.h timers.h debug.h scheduler.h proxy.h parallel_io.h \
-    common_io.h single_io.h multipole.h map.h tools.h partition.h clocks.h parser.h \
-    physical_constants.h physical_constants_cgs.h potential.h version.h \
+    common_io.h single_io.h multipole.h map.h tools.h partition.h partition_fixed_costs.h \
+    clocks.h parser.h physical_constants.h physical_constants_cgs.h potential.h version.h \
     hydro_properties.h riemann.h threadpool.h cooling_io.h cooling.h cooling_struct.h \
-    sourceterms.h sourceterms_struct.h statistics.h memswap.h cache.h runner_doiact_vec.h profiler.h \
+    statistics.h memswap.h cache.h runner_doiact_vec.h profiler.h entropy_floor.h \
     dump.h logger.h active.h timeline.h xmf.h gravity_properties.h gravity_derivatives.h \
     gravity_softened_derivatives.h vector_power.h collectgroup.h hydro_space.h sort_part.h \
     chemistry.h chemistry_io.h chemistry_struct.h cosmology.h restart.h space_getsid.h utilities.h \
-    mesh_gravity.h cbrt.h velociraptor_interface.h swift_velociraptor_part.h outputlist.h \
-    memuse.h
+    mesh_gravity.h cbrt.h exp10.h velociraptor_interface.h swift_velociraptor_part.h outputlist.h \
+    logger_io.h tracers_io.h tracers.h tracers_struct.h star_formation_io.h \
+    star_formation_struct.h star_formation.h star_formation_iact.h \
+    velociraptor_struct.h velociraptor_io.h random.h memuse.h
+
+# source files for EAGLE cooling
+EAGLE_COOLING_SOURCES =
+if HAVEEAGLECOOLING
+EAGLE_COOLING_SOURCES += cooling/EAGLE/cooling.c cooling/EAGLE/cooling_tables.c
+endif
 
 # Common source files
-AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
-    serial_io.c timers.c debug.c scheduler.c proxy.c parallel_io.c \
-    units.c common_io.c single_io.c multipole.c version.c map.c \
+AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c engine_maketasks.c \
+    engine_marktasks.c engine_drift.c serial_io.c timers.c debug.c scheduler.c \
+    proxy.c parallel_io.c units.c common_io.c single_io.c multipole.c version.c map.c \
     kernel_hydro.c tools.c part.c partition.c clocks.c parser.c \
     physical_constants.c potential.c hydro_properties.c \
-    threadpool.c cooling.c sourceterms.c \
+    threadpool.c cooling.c star_formation.c \
     statistics.c runner_doiact_vec.c profiler.c dump.c logger.c \
     part_type.c xmf.c gravity_properties.c gravity.c \
     collectgroup.c hydro_space.c equation_of_state.c \
     chemistry.c cosmology.c restart.c mesh_gravity.c velociraptor_interface.c \
-    outputlist.c memuse.c
+    outputlist.c velociraptor_dummy.c logger_io.c memuse.c \
+    $(EAGLE_COOLING_SOURCES)
 
 # Include files for distribution, not installation.
 nobase_noinst_HEADERS = align.h approx_math.h atomic.h barrier.h cycle.h error.h inline.h kernel_hydro.h kernel_gravity.h \
 		 gravity_iact.h kernel_long_gravity.h vector.h cache.h runner_doiact.h runner_doiact_vec.h runner_doiact_grav.h  \
-                 runner_doiact_nosort.h units.h intrinsics.h minmax.h kick.h timestep.h drift.h adiabatic_index.h io_properties.h \
-		 dimension.h part_type.h periodic.h memswap.h dump.h logger.h sign.h \
+                 runner_doiact_nosort.h runner_doiact_stars.h units.h intrinsics.h minmax.h kick.h timestep.h drift.h \
+		 adiabatic_index.h io_properties.h dimension.h part_type.h periodic.h memswap.h dump.h logger.h sign.h \
+		 logger_io.h timestep_limiter.h \
 		 gravity.h gravity_io.h gravity_cache.h \
 		 gravity/Default/gravity.h gravity/Default/gravity_iact.h gravity/Default/gravity_io.h \
 		 gravity/Default/gravity_debug.h gravity/Default/gravity_part.h  \
 		 gravity/Potential/gravity.h gravity/Potential/gravity_iact.h gravity/Potential/gravity_io.h \
 		 gravity/Potential/gravity_debug.h gravity/Potential/gravity_part.h  \
-		 sourceterms.h \
 		 equation_of_state.h \
 		 equation_of_state/ideal_gas/equation_of_state.h equation_of_state/isothermal/equation_of_state.h \
 	 	 hydro.h hydro_io.h \
@@ -127,13 +136,23 @@ nobase_noinst_HEADERS = align.h approx_math.h atomic.h barrier.h cycle.h error.h
 		 riemann/riemann_exact.h riemann/riemann_vacuum.h \
                  riemann/riemann_checks.h \
 	 	 stars.h stars_io.h \
-		 stars/Default/star.h stars/Default/star_iact.h stars/Default/star_io.h \
-		 stars/Default/star_debug.h stars/Default/star_part.h  \
+		 stars/Default/stars.h stars/Default/stars_iact.h stars/Default/stars_io.h \
+		 stars/Default/stars_debug.h stars/Default/stars_part.h  \
+		 stars/EAGLE/stars.h stars/EAGLE/stars_iact.h stars/EAGLE/stars_io.h \
+		 stars/EAGLE/stars_debug.h stars/EAGLE/stars_part.h \
 	         potential/none/potential.h potential/point_mass/potential.h \
                  potential/isothermal/potential.h potential/disc_patch/potential.h \
                  potential/sine_wave/potential.h \
+		 star_formation/none/star_formation.h star_formation/none/star_formation_struct.h \
+		 star_formation/none/star_formation_io.h star_formation/none/star_formation_iact.h \
+		 star_formation/EAGLE/star_formation.h star_formation/EAGLE/star_formation_struct.h \
+		 star_formation/EAGLE/star_formation_io.h star_formation/EAGLE/star_formation_iact.h \
+		 star_formation/GEAR/star_formation.h star_formation/GEAR/star_formation_struct.h \
+		 star_formation/GEAR/star_formation_io.h star_formation/GEAR/star_formation_iact.h \
 		 cooling/none/cooling.h cooling/none/cooling_struct.h \
                  cooling/none/cooling_io.h \
+		 cooling/Compton/cooling.h cooling/Compton/cooling_struct.h \
+                 cooling/Compton/cooling_io.h \
 	         cooling/const_du/cooling.h cooling/const_du/cooling_struct.h \
                  cooling/const_du/cooling_io.h \
                  cooling/const_lambda/cooling.h cooling/const_lambda/cooling_struct.h \
@@ -141,7 +160,7 @@ nobase_noinst_HEADERS = align.h approx_math.h atomic.h barrier.h cycle.h error.h
                  cooling/grackle/cooling.h cooling/grackle/cooling_struct.h \
                  cooling/grackle/cooling_io.h \
 		 cooling/EAGLE/cooling.h cooling/EAGLE/cooling_struct.h \
-                 cooling/EAGLE/cooling_io.h \
+                 cooling/EAGLE/cooling_io.h cooling/EAGLE/interpolate.h cooling/EAGLE/cooling_rates.h \
                  chemistry/none/chemistry.h \
 		 chemistry/none/chemistry_io.h \
 		 chemistry/none/chemistry_struct.h \
@@ -153,7 +172,13 @@ nobase_noinst_HEADERS = align.h approx_math.h atomic.h barrier.h cycle.h error.h
                  chemistry/EAGLE/chemistry.h \
 		 chemistry/EAGLE/chemistry_io.h \
 		 chemistry/EAGLE/chemistry_struct.h\
-		 chemistry/EAGLE/chemistry_iact.h
+		 chemistry/EAGLE/chemistry_iact.h \
+	         entropy_floor/none/entropy_floor.h \
+                 entropy_floor/EAGLE/entropy_floor.h \
+		 tracers/none/tracers.h tracers/none/tracers_struct.h \
+                 tracers/none/tracers_io.h \
+		 tracers/EAGLE/tracers.h tracers/EAGLE/tracers_struct.h \
+                 tracers/EAGLE/tracers_io.h
 
 
 # Sources and flags for regular library
diff --git a/src/active.h b/src/active.h
index 3fe52a86b373ff0b33b88eca0dac9b7c6b58a216..6466cd314fdc18ad324bf01a1ff4e73e214e35d5 100644
--- a/src/active.h
+++ b/src/active.h
@@ -39,15 +39,16 @@ __attribute__((always_inline)) INLINE static int cell_are_part_drifted(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_old_part > e->ti_current)
+  if (c->hydro.ti_old_part > e->ti_current)
     error(
-        "Cell has been drifted too far forward in time! c->ti_old=%lld (t=%e) "
+        "Cell has been drifted too far forward in time! c->ti_old_part=%lld "
+        "(t=%e) "
         "and e->ti_current=%lld (t=%e, a=%e)",
-        c->ti_old_part, c->ti_old_part * e->time_base, e->ti_current,
-        e->ti_current * e->time_base, e->cosmology->a);
+        c->hydro.ti_old_part, c->hydro.ti_old_part * e->time_base,
+        e->ti_current, e->ti_current * e->time_base, e->cosmology->a);
 #endif
 
-  return (c->ti_old_part == e->ti_current);
+  return (c->hydro.ti_old_part == e->ti_current);
 }
 
 /**
@@ -62,15 +63,38 @@ __attribute__((always_inline)) INLINE static int cell_are_gpart_drifted(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_old_gpart > e->ti_current)
+  if (c->grav.ti_old_part > e->ti_current)
     error(
         "Cell has been drifted too far forward in time! c->ti_old=%lld (t=%e) "
         "and e->ti_current=%lld (t=%e)",
-        c->ti_old_gpart, c->ti_old_gpart * e->time_base, e->ti_current,
+        c->grav.ti_old_part, c->grav.ti_old_part * e->time_base, e->ti_current,
         e->ti_current * e->time_base);
 #endif
 
-  return (c->ti_old_gpart == e->ti_current);
+  return (c->grav.ti_old_part == e->ti_current);
+}
+
+/**
+ * @brief Check that the #spart in a #cell have been drifted to the current
+ * time.
+ *
+ * @param c The #cell.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #cell has been drifted to the current time, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int cell_are_spart_drifted(
+    const struct cell *c, const struct engine *e) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->stars.ti_old_part > e->ti_current)
+    error(
+        "Cell has been drifted too far forward in time! c->ti_old=%lld (t=%e) "
+        "and e->ti_current=%lld (t=%e)",
+        c->stars.ti_old_part, c->stars.ti_old_part * e->time_base,
+        e->ti_current, e->ti_current * e->time_base);
+#endif
+
+  return (c->stars.ti_old_part == e->ti_current);
 }
 
 /* Are cells / particles active for regular tasks ? */
@@ -86,15 +110,15 @@ __attribute__((always_inline)) INLINE static int cell_is_active_hydro(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_hydro_end_min < e->ti_current)
+  if (c->hydro.ti_end_min < e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_end_min=%lld (t=%e) and "
         "e->ti_current=%lld (t=%e, a=%e)",
-        c->ti_hydro_end_min, c->ti_hydro_end_min * e->time_base, e->ti_current,
+        c->hydro.ti_end_min, c->hydro.ti_end_min * e->time_base, e->ti_current,
         e->ti_current * e->time_base, e->cosmology->a);
 #endif
 
-  return (c->ti_hydro_end_min == e->ti_current);
+  return (c->hydro.ti_end_min == e->ti_current);
 }
 
 /**
@@ -108,14 +132,14 @@ __attribute__((always_inline)) INLINE static int cell_is_all_active_hydro(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_hydro_end_max < e->ti_current)
+  if (c->hydro.count > 0 && c->hydro.ti_end_max < e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_end_max=%lld "
         "e->ti_current=%lld",
-        c->ti_hydro_end_max, e->ti_current);
+        c->hydro.ti_end_max, e->ti_current);
 #endif
 
-  return (c->ti_hydro_end_max == e->ti_current);
+  return (c->hydro.ti_end_max == e->ti_current);
 }
 
 /**
@@ -129,15 +153,28 @@ __attribute__((always_inline)) INLINE static int cell_is_active_gravity(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_gravity_end_min < e->ti_current)
+  if (c->grav.ti_end_min < e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_end_min=%lld (t=%e) and "
         "e->ti_current=%lld (t=%e, a=%e)",
-        c->ti_gravity_end_min, c->ti_gravity_end_min * e->time_base,
-        e->ti_current, e->ti_current * e->time_base, e->cosmology->a);
+        c->grav.ti_end_min, c->grav.ti_end_min * e->time_base, e->ti_current,
+        e->ti_current * e->time_base, e->cosmology->a);
 #endif
 
-  return (c->ti_gravity_end_min == e->ti_current);
+  return (c->grav.ti_end_min == e->ti_current);
+}
+
+/**
+ * @brief Does a cell contain any multipole requiring calculation ?
+ *
+ * @param c The #cell.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #cell contains at least an active particle, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int cell_is_active_gravity_mm(
+    const struct cell *c, const struct engine *e) {
+
+  return (c->grav.ti_end_min == e->ti_current);
 }
 
 /**
@@ -151,14 +188,36 @@ __attribute__((always_inline)) INLINE static int cell_is_all_active_gravity(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_gravity_end_max < e->ti_current)
+  if (c->grav.count > 0 && c->grav.ti_end_max < e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_end_max=%lld "
         "e->ti_current=%lld",
-        c->ti_gravity_end_max, e->ti_current);
+        c->grav.ti_end_max, e->ti_current);
 #endif
 
-  return (c->ti_gravity_end_max == e->ti_current);
+  return (c->grav.ti_end_max == e->ti_current);
+}
+
+/**
+ * @brief Does a cell contain any s-particle finishing their time-step now ?
+ *
+ * @param c The #cell.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #cell contains at least an active particle, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int cell_is_active_stars(
+    const struct cell *c, const struct engine *e) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->stars.ti_end_min < e->ti_current)
+    error(
+        "cell in an impossible time-zone! c->ti_end_min=%lld (t=%e) and "
+        "e->ti_current=%lld (t=%e, a=%e)",
+        c->stars.ti_end_min, c->stars.ti_end_min * e->time_base, e->ti_current,
+        e->ti_current * e->time_base, e->cosmology->a);
+#endif
+
+  return (c->stars.ti_end_min == e->ti_current);
 }
 
 /**
@@ -249,6 +308,42 @@ __attribute__((always_inline)) INLINE static int spart_is_active(
   return (spart_bin <= max_active_bin);
 }
 
+/**
+ * @brief Has this particle been inhibited?
+ *
+ * @param p The #part.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #part is inhibited, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int part_is_inhibited(
+    const struct part *p, const struct engine *e) {
+  return p->time_bin == time_bin_inhibited;
+}
+
+/**
+ * @brief Has this gravity particle been inhibited?
+ *
+ * @param gp The #gpart.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #part is inhibited, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int gpart_is_inhibited(
+    const struct gpart *gp, const struct engine *e) {
+  return gp->time_bin == time_bin_inhibited;
+}
+
+/**
+ * @brief Has this star particle been inhibited?
+ *
+ * @param sp The #spart.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #part is inhibited, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int spart_is_inhibited(
+    const struct spart *sp, const struct engine *e) {
+  return sp->time_bin == time_bin_inhibited;
+}
+
 /* Are cells / particles active for kick1 tasks ? */
 
 /**
@@ -262,15 +357,15 @@ __attribute__((always_inline)) INLINE static int cell_is_starting_hydro(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_hydro_beg_max > e->ti_current)
+  if (c->hydro.ti_beg_max > e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_beg_max=%lld (t=%e) and "
         "e->ti_current=%lld (t=%e, a=%e)",
-        c->ti_hydro_beg_max, c->ti_hydro_beg_max * e->time_base, e->ti_current,
+        c->hydro.ti_beg_max, c->hydro.ti_beg_max * e->time_base, e->ti_current,
         e->ti_current * e->time_base, e->cosmology->a);
 #endif
 
-  return (c->ti_hydro_beg_max == e->ti_current);
+  return (c->hydro.ti_beg_max == e->ti_current);
 }
 
 /**
@@ -284,15 +379,37 @@ __attribute__((always_inline)) INLINE static int cell_is_starting_gravity(
     const struct cell *c, const struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_gravity_beg_max > e->ti_current)
+  if (c->grav.ti_beg_max > e->ti_current)
     error(
         "cell in an impossible time-zone! c->ti_beg_max=%lld (t=%e) and "
         "e->ti_current=%lld (t=%e, a=%e)",
-        c->ti_gravity_beg_max, c->ti_gravity_beg_max * e->time_base,
-        e->ti_current, e->ti_current * e->time_base, e->cosmology->a);
+        c->grav.ti_beg_max, c->grav.ti_beg_max * e->time_base, e->ti_current,
+        e->ti_current * e->time_base, e->cosmology->a);
 #endif
 
-  return (c->ti_gravity_beg_max == e->ti_current);
+  return (c->grav.ti_beg_max == e->ti_current);
+}
+
+/**
+ * @brief Does a cell contain any s-particle starting their time-step now ?
+ *
+ * @param c The #cell.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #cell contains at least an active particle, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int cell_is_starting_stars(
+    const struct cell *c, const struct engine *e) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->stars.ti_beg_max > e->ti_current)
+    error(
+        "cell in an impossible time-zone! c->ti_beg_max=%lld (t=%e) and "
+        "e->ti_current=%lld (t=%e, a=%e)",
+        c->stars.ti_beg_max, c->stars.ti_beg_max * e->time_base, e->ti_current,
+        e->ti_current * e->time_base, e->cosmology->a);
+#endif
+
+  return (c->stars.ti_beg_max == e->ti_current);
 }
 
 /**
@@ -378,4 +495,5 @@ __attribute__((always_inline)) INLINE static int spart_is_starting(
 
   return (spart_bin <= max_active_bin);
 }
+
 #endif /* SWIFT_ACTIVE_H */
diff --git a/src/align.h b/src/align.h
index 6d329ae7983d68aee096f6f9e65990d5fed6a0f2..24ff0828b09855f31c187b655b1d751e78af8769 100644
--- a/src/align.h
+++ b/src/align.h
@@ -44,6 +44,8 @@
  * alignment.
  *
  * Note that this turns into a no-op but gives information to the compiler.
+ * For GCC versions older than 4.6 this is ignored as the builtin does not
+ * exist.
  *
  * @param type The type of the array.
  * @param array The array.
@@ -52,11 +54,11 @@
 #if defined(__ICC)
 #define swift_align_information(type, array, alignment) \
   __assume_aligned(array, alignment);
-#elif defined(__GNUC__)
+#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 6)
 #define swift_align_information(type, array, alignment) \
   array = (type *)__builtin_assume_aligned(array, alignment);
 #else
-#define swift_align_information(array, alignment) ;
+#define swift_align_information(type, array, alignment) ;
 #endif
 
 /**
diff --git a/src/approx_math.h b/src/approx_math.h
index 90ea4eb997c71311e0c1ce854bbdd0a0ba7396ce..f347bab44790d1e3120675bcbd6e7a457ca09821 100644
--- a/src/approx_math.h
+++ b/src/approx_math.h
@@ -21,6 +21,31 @@
 
 #include "inline.h"
 
+/**
+ * @brief Approximate version of the complementay error function erfcf(x).
+ *
+ * This is based on eq. 7.1.27 of Abramowitz & Stegun, 1972.
+ * The absolute error is < 4.7*10^-4 over the range 0 < x < infinity.
+ *
+ * Returns garbage for x < 0.
+ * @param x The number to compute erfc for.
+ */
+__attribute__((always_inline, const)) INLINE static float approx_erfcf(
+    float x) {
+
+  /* 1 + 0.278393*x + 0.230389*x^2 + 0.000972*x^3 + 0.078108*x^4 */
+  float arg = 0.078108f;
+  arg = x * arg + 0.000972f;
+  arg = x * arg + 0.230389f;
+  arg = x * arg + 0.278393f;
+  arg = x * arg + 1.f;
+
+  /* 1 / arg^4 */
+  const float arg2 = arg * arg;
+  const float arg4 = arg2 * arg2;
+  return 1.f / arg4;
+}
+
 /**
  * @brief Approximate version of expf(x) using a 4th order Taylor expansion
  *
diff --git a/src/atomic.h b/src/atomic.h
index 69df59e9fba965422eaf9a3b3de9d28ab9f09dad..10548c6a20249b4b0c362c5e6ab78ea5d85b2091 100644
--- a/src/atomic.h
+++ b/src/atomic.h
@@ -127,4 +127,36 @@ __attribute__((always_inline)) INLINE static void atomic_add_f(
   } while (test_val.as_int != old_val.as_int);
 }
 
+/**
+ * @brief Atomic add operation on doubles.
+ *
+ * This is a text-book implementation based on an atomic CAS.
+ *
+ * We create a temporary union to cope with the int-only atomic CAS
+ * and the floating-point add that we want.
+ *
+ * @param address The address to update.
+ * @param y The value to update the address with.
+ */
+__attribute__((always_inline)) INLINE static void atomic_add_d(
+    volatile double *const address, const double y) {
+
+  long long *const long_long_ptr = (long long *)address;
+
+  typedef union {
+    double as_double;
+    long long as_long_long;
+  } cast_type;
+
+  cast_type test_val, old_val, new_val;
+  old_val.as_double = *address;
+
+  do {
+    test_val.as_long_long = old_val.as_long_long;
+    new_val.as_double = old_val.as_double + y;
+    old_val.as_long_long =
+        atomic_cas(long_long_ptr, test_val.as_long_long, new_val.as_long_long);
+  } while (test_val.as_long_long != old_val.as_long_long);
+}
+
 #endif /* SWIFT_ATOMIC_H */
diff --git a/src/cache.h b/src/cache.h
index c41e11c34246ef0de93bb1ae7500277aab555b9e..e5a62f33b3eb492f9da6e0e98ed767d6b8de32dd 100644
--- a/src/cache.h
+++ b/src/cache.h
@@ -123,8 +123,7 @@ __attribute__((always_inline)) INLINE void cache_init(struct cache *c,
                                                       size_t count) {
 
   /* Align cache on correct byte boundary and pad cache size to be a multiple of
-   * the vector size
-   * and include 2 vector lengths for remainder operations. */
+   * the vector size and include 2 vector lengths for remainder operations. */
   size_t pad = 2 * VEC_SIZE, rem = count % VEC_SIZE;
   if (rem > 0) pad += VEC_SIZE - rem;
   size_t sizeBytes = (count + pad) * sizeof(float);
@@ -179,8 +178,9 @@ __attribute__((always_inline)) INLINE void cache_init(struct cache *c,
  *
  * @param ci The #cell.
  * @param ci_cache The cache.
+ * @return uninhibited_count The no. of uninhibited particles.
  */
-__attribute__((always_inline)) INLINE void cache_read_particles(
+__attribute__((always_inline)) INLINE int cache_read_particles(
     const struct cell *restrict const ci,
     struct cache *restrict const ci_cache) {
 
@@ -197,12 +197,29 @@ __attribute__((always_inline)) INLINE void cache_read_particles(
   swift_declare_aligned_ptr(float, vy, ci_cache->vy, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vz, ci_cache->vz, SWIFT_CACHE_ALIGNMENT);
 
-  const struct part *restrict parts = ci->parts;
+  const int count = ci->hydro.count;
+  const struct part *restrict parts = ci->hydro.parts;
   const double loc[3] = {ci->loc[0], ci->loc[1], ci->loc[2]};
+  const double max_dx = ci->hydro.dx_max_part;
+  const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
+                               -(2. * ci->width[1] + max_dx),
+                               -(2. * ci->width[2] + max_dx)};
+  const float h_padded = ci->hydro.h_max / 4.;
 
   /* Shift the particles positions to a local frame so single precision can be
    * used instead of double precision. */
-  for (int i = 0; i < ci->count; i++) {
+  for (int i = 0; i < count; i++) {
+
+    /* Pad inhibited particles. */
+    if (parts[i].time_bin >= time_bin_inhibited) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+      h[i] = h_padded;
+
+      continue;
+    }
+
     x[i] = (float)(parts[i].x[0] - loc[0]);
     y[i] = (float)(parts[i].x[1] - loc[1]);
     z[i] = (float)(parts[i].x[2] - loc[2]);
@@ -213,6 +230,103 @@ __attribute__((always_inline)) INLINE void cache_read_particles(
     vz[i] = parts[i].v[2];
   }
 
+  /* Pad cache if the no. of particles is not a multiple of double the vector
+   * length. */
+  int count_align = count;
+  const int rem = count % (NUM_VEC_PROC * VEC_SIZE);
+  if (rem != 0) {
+    count_align += (NUM_VEC_PROC * VEC_SIZE) - rem;
+
+    /* Set positions to something outside of the range of any particle */
+    for (int i = count; i < count_align; i++) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+    }
+  }
+
+  return count_align;
+
+#else
+  error("Can't call the cache reading function with this flavour of SPH!");
+  return 0;
+#endif
+}
+
+/**
+ * @brief Populate cache by reading in the particles in unsorted order for
+ * doself_subset.
+ *
+ * @param ci The #cell.
+ * @param ci_cache The cache.
+ * @return uninhibited_count The no. of uninhibited particles.
+ */
+__attribute__((always_inline)) INLINE int cache_read_particles_subset_self(
+    const struct cell *restrict const ci,
+    struct cache *restrict const ci_cache) {
+
+#if defined(GADGET2_SPH)
+
+  /* Let the compiler know that the data is aligned and create pointers to the
+   * arrays inside the cache. */
+  swift_declare_aligned_ptr(float, x, ci_cache->x, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, y, ci_cache->y, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, z, ci_cache->z, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, m, ci_cache->m, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, vx, ci_cache->vx, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, vy, ci_cache->vy, SWIFT_CACHE_ALIGNMENT);
+  swift_declare_aligned_ptr(float, vz, ci_cache->vz, SWIFT_CACHE_ALIGNMENT);
+
+  const int count = ci->hydro.count;
+  const struct part *restrict parts = ci->hydro.parts;
+  const double loc[3] = {ci->loc[0], ci->loc[1], ci->loc[2]};
+  const double max_dx = ci->hydro.dx_max_part;
+  const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
+                               -(2. * ci->width[1] + max_dx),
+                               -(2. * ci->width[2] + max_dx)};
+
+  /* Shift the particles positions to a local frame so single precision can be
+   * used instead of double precision. */
+  for (int i = 0; i < count; i++) {
+
+    /* Pad inhibited particles. */
+    if (parts[i].time_bin >= time_bin_inhibited) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+
+      continue;
+    }
+
+    x[i] = (float)(parts[i].x[0] - loc[0]);
+    y[i] = (float)(parts[i].x[1] - loc[1]);
+    z[i] = (float)(parts[i].x[2] - loc[2]);
+    m[i] = parts[i].mass;
+    vx[i] = parts[i].v[0];
+    vy[i] = parts[i].v[1];
+    vz[i] = parts[i].v[2];
+  }
+
+  /* Pad cache if the no. of particles is not a multiple of double the vector
+   * length. */
+  int count_align = count;
+  const int rem = count % (NUM_VEC_PROC * VEC_SIZE);
+  if (rem != 0) {
+    count_align += (NUM_VEC_PROC * VEC_SIZE) - rem;
+
+    /* Set positions to something outside of the range of any particle */
+    for (int i = count; i < count_align; i++) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+    }
+  }
+
+  return count_align;
+
+#else
+  error("Can't call the cache reading function with this flavour of SPH!");
+  return 0;
 #endif
 }
 
@@ -230,7 +344,7 @@ __attribute__((always_inline)) INLINE void cache_read_particles(
  * @param loc The cell location to remove from the particle positions.
  * @param flipped Flag to check whether the cells have been flipped or not.
  */
-__attribute__((always_inline)) INLINE void cache_read_particles_subset(
+__attribute__((always_inline)) INLINE void cache_read_particles_subset_pair(
     const struct cell *restrict const ci, struct cache *restrict const ci_cache,
     const struct entry *restrict sort_i, int *first_pi, int *last_pi,
     const double *loc, const int flipped) {
@@ -242,13 +356,12 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
   swift_declare_aligned_ptr(float, x, ci_cache->x, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, y, ci_cache->y, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, z, ci_cache->z, SWIFT_CACHE_ALIGNMENT);
-  swift_declare_aligned_ptr(float, h, ci_cache->h, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, m, ci_cache->m, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vx, ci_cache->vx, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vy, ci_cache->vy, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vz, ci_cache->vz, SWIFT_CACHE_ALIGNMENT);
 
-  const struct part *restrict parts = ci->parts;
+  const struct part *restrict parts = ci->hydro.parts;
 
   /* The cell is on the right so read the particles
    * into the cache from the start of the cell. */
@@ -258,17 +371,35 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
       const int pad = VEC_SIZE - rem;
 
       /* Increase last_pi if there are particles in the cell left to read. */
-      if (*last_pi + pad < ci->count) *last_pi += pad;
+      if (*last_pi + pad < ci->hydro.count) *last_pi += pad;
     }
 
+    const double max_dx = ci->hydro.dx_max_part;
+    const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
+                                 -(2. * ci->width[1] + max_dx),
+                                 -(2. * ci->width[2] + max_dx)};
+
     /* Shift the particles positions to a local frame so single precision can be
      * used instead of double precision. */
     for (int i = 0; i < *last_pi; i++) {
       const int idx = sort_i[i].i;
+
+      /* Put inhibited particles out of range. */
+      if (parts[idx].time_bin >= time_bin_inhibited) {
+        x[i] = pos_padded[0];
+        y[i] = pos_padded[1];
+        z[i] = pos_padded[2];
+        m[i] = 1.f;
+        vx[i] = 1.f;
+        vy[i] = 1.f;
+        vz[i] = 1.f;
+
+        continue;
+      }
+
       x[i] = (float)(parts[idx].x[0] - loc[0]);
       y[i] = (float)(parts[idx].x[1] - loc[1]);
       z[i] = (float)(parts[idx].x[2] - loc[2]);
-      h[i] = parts[idx].h;
       m[i] = parts[idx].mass;
       vx[i] = parts[idx].v[0];
       vy[i] = parts[idx].v[1];
@@ -278,17 +409,10 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
     /* Pad cache with fake particles that exist outside the cell so will not
      * interact. We use values of the same magnitude (but negative!) as the real
      * particles to avoid overflow problems. */
-    const double max_dx = ci->dx_max_part;
-    const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
-                                 -(2. * ci->width[1] + max_dx),
-                                 -(2. * ci->width[2] + max_dx)};
-    const float h_padded = ci->parts[0].h;
-
     for (int i = *last_pi; i < *last_pi + VEC_SIZE; i++) {
       x[i] = pos_padded[0];
       y[i] = pos_padded[1];
       z[i] = pos_padded[2];
-      h[i] = h_padded;
 
       m[i] = 1.f;
       vx[i] = 1.f;
@@ -299,7 +423,7 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
   /* The cell is on the left so read the particles
    * into the cache from the end of the cell. */
   else {
-    const int rem = (ci->count - *first_pi) % VEC_SIZE;
+    const int rem = (ci->hydro.count - *first_pi) % VEC_SIZE;
     if (rem != 0) {
       const int pad = VEC_SIZE - rem;
 
@@ -307,16 +431,34 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
       if (*first_pi - pad >= 0) *first_pi -= pad;
     }
 
-    const int ci_cache_count = ci->count - *first_pi;
+    const int ci_cache_count = ci->hydro.count - *first_pi;
+    const double max_dx = ci->hydro.dx_max_part;
+    const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
+                                 -(2. * ci->width[1] + max_dx),
+                                 -(2. * ci->width[2] + max_dx)};
 
     /* Shift the particles positions to a local frame so single precision can be
      * used instead of double precision. */
     for (int i = 0; i < ci_cache_count; i++) {
       const int idx = sort_i[i + *first_pi].i;
+
+      /* Put inhibited particles out of range. */
+      if (parts[idx].time_bin >= time_bin_inhibited) {
+        x[i] = pos_padded[0];
+        y[i] = pos_padded[1];
+        z[i] = pos_padded[2];
+
+        m[i] = 1.f;
+        vx[i] = 1.f;
+        vy[i] = 1.f;
+        vz[i] = 1.f;
+
+        continue;
+      }
+
       x[i] = (float)(parts[idx].x[0] - loc[0]);
       y[i] = (float)(parts[idx].x[1] - loc[1]);
       z[i] = (float)(parts[idx].x[2] - loc[2]);
-      h[i] = parts[idx].h;
       m[i] = parts[idx].mass;
       vx[i] = parts[idx].v[0];
       vy[i] = parts[idx].v[1];
@@ -326,18 +468,11 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
     /* Pad cache with fake particles that exist outside the cell so will not
      * interact. We use values of the same magnitude (but negative!) as the real
      * particles to avoid overflow problems. */
-    const double max_dx = ci->dx_max_part;
-    const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
-                                 -(2. * ci->width[1] + max_dx),
-                                 -(2. * ci->width[2] + max_dx)};
-    const float h_padded = ci->parts[0].h;
-
-    for (int i = ci->count - *first_pi; i < ci->count - *first_pi + VEC_SIZE;
-         i++) {
+    for (int i = ci->hydro.count - *first_pi;
+         i < ci->hydro.count - *first_pi + VEC_SIZE; i++) {
       x[i] = pos_padded[0];
       y[i] = pos_padded[1];
       z[i] = pos_padded[2];
-      h[i] = h_padded;
 
       m[i] = 1.f;
       vx[i] = 1.f;
@@ -355,8 +490,9 @@ __attribute__((always_inline)) INLINE void cache_read_particles_subset(
  *
  * @param ci The #cell.
  * @param ci_cache The cache.
+ * @return uninhibited_count The no. of uninhibited particles.
  */
-__attribute__((always_inline)) INLINE void cache_read_force_particles(
+__attribute__((always_inline)) INLINE int cache_read_force_particles(
     const struct cell *restrict const ci,
     struct cache *restrict const ci_cache) {
 
@@ -382,12 +518,34 @@ __attribute__((always_inline)) INLINE void cache_read_force_particles(
   swift_declare_aligned_ptr(float, soundspeed, ci_cache->soundspeed,
                             SWIFT_CACHE_ALIGNMENT);
 
-  const struct part *restrict parts = ci->parts;
+  const int count = ci->hydro.count;
+  const struct part *restrict parts = ci->hydro.parts;
   const double loc[3] = {ci->loc[0], ci->loc[1], ci->loc[2]};
+  const double max_dx = ci->hydro.dx_max_part;
+  const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
+                               -(2. * ci->width[1] + max_dx),
+                               -(2. * ci->width[2] + max_dx)};
+  const float h_padded = ci->hydro.h_max / 4.;
 
   /* Shift the particles positions to a local frame so single precision can be
    * used instead of double precision. */
-  for (int i = 0; i < ci->count; i++) {
+  for (int i = 0; i < count; i++) {
+
+    /* Skip inhibited particles. */
+    if (parts[i].time_bin >= time_bin_inhibited) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+      h[i] = h_padded;
+      rho[i] = 1.f;
+      grad_h[i] = 1.f;
+      pOrho2[i] = 1.f;
+      balsara[i] = 1.f;
+      soundspeed[i] = 1.f;
+
+      continue;
+    }
+
     x[i] = (float)(parts[i].x[0] - loc[0]);
     y[i] = (float)(parts[i].x[1] - loc[1]);
     z[i] = (float)(parts[i].x[2] - loc[2]);
@@ -403,6 +561,32 @@ __attribute__((always_inline)) INLINE void cache_read_force_particles(
     soundspeed[i] = parts[i].force.soundspeed;
   }
 
+  /* Pad cache if there is a serial remainder. */
+  int count_align = count;
+  const int rem = count % VEC_SIZE;
+  if (rem != 0) {
+    count_align += VEC_SIZE - rem;
+
+    /* Set positions to the same as particle pi so when the r2 > 0 mask is
+     * applied these extra contributions are masked out.*/
+    for (int i = count; i < count_align; i++) {
+      x[i] = pos_padded[0];
+      y[i] = pos_padded[1];
+      z[i] = pos_padded[2];
+      h[i] = h_padded;
+      rho[i] = 1.f;
+      grad_h[i] = 1.f;
+      pOrho2[i] = 1.f;
+      balsara[i] = 1.f;
+      soundspeed[i] = 1.f;
+    }
+  }
+
+  return count_align;
+
+#else
+  error("Can't call the cache reading function with this flavour of SPH!");
+  return 0;
 #endif
 }
 
@@ -433,7 +617,7 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
    * cache. */
 
   /* Is the number of particles to read a multiple of the vector size? */
-  int rem = (ci->count - *first_pi) % VEC_SIZE;
+  int rem = (ci->hydro.count - *first_pi) % VEC_SIZE;
   if (rem != 0) {
     int pad = VEC_SIZE - rem;
 
@@ -446,14 +630,14 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
     int pad = VEC_SIZE - rem;
 
     /* Increase last_pj if there are particles in the cell left to read. */
-    if (*last_pj + pad < cj->count) *last_pj += pad;
+    if (*last_pj + pad < cj->hydro.count) *last_pj += pad;
   }
 
   /* Get some local pointers */
   const int first_pi_align = *first_pi;
   const int last_pj_align = *last_pj;
-  const struct part *restrict parts_i = ci->parts;
-  const struct part *restrict parts_j = cj->parts;
+  const struct part *restrict parts_i = ci->hydro.parts;
+  const struct part *restrict parts_j = cj->hydro.parts;
 
   /* Shift particles to the local frame and account for boundary conditions.*/
   const double total_ci_shift[3] = {
@@ -471,12 +655,33 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
   swift_declare_aligned_ptr(float, vy, ci_cache->vy, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vz, ci_cache->vz, SWIFT_CACHE_ALIGNMENT);
 
-  int ci_cache_count = ci->count - first_pi_align;
+  int ci_cache_count = ci->hydro.count - first_pi_align;
+  const double max_dx = max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
+  const float pos_padded_i[3] = {-(2. * ci->width[0] + max_dx),
+                                 -(2. * ci->width[1] + max_dx),
+                                 -(2. * ci->width[2] + max_dx)};
+  const float h_padded_i = ci->hydro.h_max / 4.;
 
   /* Shift the particles positions to a local frame (ci frame) so single
    * precision can be used instead of double precision.  */
   for (int i = 0; i < ci_cache_count; i++) {
     const int idx = sort_i[i + first_pi_align].i;
+
+    /* Put inhibited particles out of range. */
+    if (parts_i[idx].time_bin >= time_bin_inhibited) {
+      x[i] = pos_padded_i[0];
+      y[i] = pos_padded_i[1];
+      z[i] = pos_padded_i[2];
+      h[i] = h_padded_i;
+
+      m[i] = 1.f;
+      vx[i] = 1.f;
+      vy[i] = 1.f;
+      vz[i] = 1.f;
+
+      continue;
+    }
+
     x[i] = (float)(parts_i[idx].x[0] - total_ci_shift[0]);
     y[i] = (float)(parts_i[idx].x[1] - total_ci_shift[1]);
     z[i] = (float)(parts_i[idx].x[2] - total_ci_shift[2]);
@@ -491,11 +696,14 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
 
 #ifdef SWIFT_DEBUG_CHECKS
   const float shift_threshold_x =
-      2. * ci->width[0] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[0] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_y =
-      2. * ci->width[1] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[1] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_z =
-      2. * ci->width[2] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[2] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
 
   /* Make sure that particle positions have been shifted correctly. */
   for (int i = 0; i < ci_cache_count; i++) {
@@ -529,18 +737,12 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
   /* Pad cache with fake particles that exist outside the cell so will not
    * interact. We use values of the same magnitude (but negative!) as the real
    * particles to avoid overflow problems. */
-  const double max_dx = max(ci->dx_max_part, cj->dx_max_part);
-  const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
-                               -(2. * ci->width[1] + max_dx),
-                               -(2. * ci->width[2] + max_dx)};
-  const float h_padded = ci->parts[0].h;
-
-  for (int i = ci->count - first_pi_align;
-       i < ci->count - first_pi_align + VEC_SIZE; i++) {
-    x[i] = pos_padded[0];
-    y[i] = pos_padded[1];
-    z[i] = pos_padded[2];
-    h[i] = h_padded;
+  for (int i = ci->hydro.count - first_pi_align;
+       i < ci->hydro.count - first_pi_align + VEC_SIZE; i++) {
+    x[i] = pos_padded_i[0];
+    y[i] = pos_padded_i[1];
+    z[i] = pos_padded_i[2];
+    h[i] = h_padded_i;
 
     m[i] = 1.f;
     vx[i] = 1.f;
@@ -559,8 +761,29 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
   swift_declare_aligned_ptr(float, vyj, cj_cache->vy, SWIFT_CACHE_ALIGNMENT);
   swift_declare_aligned_ptr(float, vzj, cj_cache->vz, SWIFT_CACHE_ALIGNMENT);
 
+  const float pos_padded_j[3] = {-(2. * cj->width[0] + max_dx),
+                                 -(2. * cj->width[1] + max_dx),
+                                 -(2. * cj->width[2] + max_dx)};
+  const float h_padded_j = cj->hydro.h_max / 4.;
+
   for (int i = 0; i <= last_pj_align; i++) {
     const int idx = sort_j[i].i;
+
+    /* Put inhibited particles out of range. */
+    if (parts_j[idx].time_bin >= time_bin_inhibited) {
+      xj[i] = pos_padded_j[0];
+      yj[i] = pos_padded_j[1];
+      zj[i] = pos_padded_j[2];
+      hj[i] = h_padded_j;
+
+      mj[i] = 1.f;
+      vxj[i] = 1.f;
+      vyj[i] = 1.f;
+      vzj[i] = 1.f;
+
+      continue;
+    }
+
     xj[i] = (float)(parts_j[idx].x[0] - total_cj_shift[0]);
     yj[i] = (float)(parts_j[idx].x[1] - total_cj_shift[1]);
     zj[i] = (float)(parts_j[idx].x[2] - total_cj_shift[2]);
@@ -606,11 +829,6 @@ __attribute__((always_inline)) INLINE void cache_read_two_partial_cells_sorted(
   /* Pad cache with fake particles that exist outside the cell so will not
    * interact. We use values of the same magnitude (but negative!) as the real
    * particles to avoid overflow problems. */
-  const float pos_padded_j[3] = {-(2. * cj->width[0] + max_dx),
-                                 -(2. * cj->width[1] + max_dx),
-                                 -(2. * cj->width[2] + max_dx)};
-  const float h_padded_j = cj->parts[0].h;
-
   for (int i = last_pj_align + 1; i < last_pj_align + 1 + VEC_SIZE; i++) {
     xj[i] = pos_padded_j[0];
     yj[i] = pos_padded_j[1];
@@ -650,7 +868,7 @@ cache_read_two_partial_cells_sorted_force(
    * cache. */
 
   /* Is the number of particles to read a multiple of the vector size? */
-  int rem = (ci->count - *first_pi) % VEC_SIZE;
+  int rem = (ci->hydro.count - *first_pi) % VEC_SIZE;
   if (rem != 0) {
     int pad = VEC_SIZE - rem;
 
@@ -663,14 +881,14 @@ cache_read_two_partial_cells_sorted_force(
     int pad = VEC_SIZE - rem;
 
     /* Increase last_pj if there are particles in the cell left to read. */
-    if (*last_pj + pad < cj->count) *last_pj += pad;
+    if (*last_pj + pad < cj->hydro.count) *last_pj += pad;
   }
 
   /* Get some local pointers */
   const int first_pi_align = *first_pi;
   const int last_pj_align = *last_pj;
-  const struct part *restrict parts_i = ci->parts;
-  const struct part *restrict parts_j = cj->parts;
+  const struct part *restrict parts_i = ci->hydro.parts;
+  const struct part *restrict parts_j = cj->hydro.parts;
 
   /* Shift particles to the local frame and account for boundary conditions.*/
   const double total_ci_shift[3] = {
@@ -697,12 +915,38 @@ cache_read_two_partial_cells_sorted_force(
   swift_declare_aligned_ptr(float, soundspeed, ci_cache->soundspeed,
                             SWIFT_CACHE_ALIGNMENT);
 
-  int ci_cache_count = ci->count - first_pi_align;
+  int ci_cache_count = ci->hydro.count - first_pi_align;
+  const double max_dx = max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
+  const float pos_padded_i[3] = {-(2. * ci->width[0] + max_dx),
+                                 -(2. * ci->width[1] + max_dx),
+                                 -(2. * ci->width[2] + max_dx)};
+  const float h_padded_i = ci->hydro.h_max / 4.;
+
   /* Shift the particles positions to a local frame (ci frame) so single
    * precision can be  used instead of double precision.  */
   for (int i = 0; i < ci_cache_count; i++) {
 
     const int idx = sort_i[i + first_pi_align].i;
+
+    /* Put inhibited particles out of range. */
+    if (parts_i[idx].time_bin >= time_bin_inhibited) {
+      x[i] = pos_padded_i[0];
+      y[i] = pos_padded_i[1];
+      z[i] = pos_padded_i[2];
+      h[i] = h_padded_i;
+      m[i] = 1.f;
+      vx[i] = 1.f;
+      vy[i] = 1.f;
+      vz[i] = 1.f;
+      rho[i] = 1.f;
+      grad_h[i] = 1.f;
+      pOrho2[i] = 1.f;
+      balsara[i] = 1.f;
+      soundspeed[i] = 1.f;
+
+      continue;
+    }
+
     x[i] = (float)(parts_i[idx].x[0] - total_ci_shift[0]);
     y[i] = (float)(parts_i[idx].x[1] - total_ci_shift[1]);
     z[i] = (float)(parts_i[idx].x[2] - total_ci_shift[2]);
@@ -723,18 +967,12 @@ cache_read_two_partial_cells_sorted_force(
   /* Pad cache with fake particles that exist outside the cell so will not
    * interact. We use values of the same magnitude (but negative!) as the real
    * particles to avoid overflow problems. */
-  const double max_dx = max(ci->dx_max_part, cj->dx_max_part);
-  const float pos_padded[3] = {-(2. * ci->width[0] + max_dx),
-                               -(2. * ci->width[1] + max_dx),
-                               -(2. * ci->width[2] + max_dx)};
-  const float h_padded = ci->parts[0].h;
-
-  for (int i = ci->count - first_pi_align;
-       i < ci->count - first_pi_align + VEC_SIZE; i++) {
-    x[i] = pos_padded[0];
-    y[i] = pos_padded[1];
-    z[i] = pos_padded[2];
-    h[i] = h_padded;
+  for (int i = ci->hydro.count - first_pi_align;
+       i < ci->hydro.count - first_pi_align + VEC_SIZE; i++) {
+    x[i] = pos_padded_i[0];
+    y[i] = pos_padded_i[1];
+    z[i] = pos_padded_i[2];
+    h[i] = h_padded_i;
     m[i] = 1.f;
     vx[i] = 1.f;
     vy[i] = 1.f;
@@ -766,8 +1004,33 @@ cache_read_two_partial_cells_sorted_force(
   swift_declare_aligned_ptr(float, soundspeedj, cj_cache->soundspeed,
                             SWIFT_CACHE_ALIGNMENT);
 
+  const float pos_padded_j[3] = {-(2. * cj->width[0] + max_dx),
+                                 -(2. * cj->width[1] + max_dx),
+                                 -(2. * cj->width[2] + max_dx)};
+  const float h_padded_j = cj->hydro.h_max / 4.;
+
   for (int i = 0; i <= last_pj_align; i++) {
     const int idx = sort_j[i].i;
+
+    /* Put inhibited particles out of range. */
+    if (parts_j[idx].time_bin == time_bin_inhibited) {
+      xj[i] = pos_padded_j[0];
+      yj[i] = pos_padded_j[1];
+      zj[i] = pos_padded_j[2];
+      hj[i] = h_padded_j;
+      mj[i] = 1.f;
+      vxj[i] = 1.f;
+      vyj[i] = 1.f;
+      vzj[i] = 1.f;
+      rhoj[i] = 1.f;
+      grad_hj[i] = 1.f;
+      pOrho2j[i] = 1.f;
+      balsaraj[i] = 1.f;
+      soundspeedj[i] = 1.f;
+
+      continue;
+    }
+
     xj[i] = (float)(parts_j[idx].x[0] - total_cj_shift[0]);
     yj[i] = (float)(parts_j[idx].x[1] - total_cj_shift[1]);
     zj[i] = (float)(parts_j[idx].x[2] - total_cj_shift[2]);
@@ -788,11 +1051,6 @@ cache_read_two_partial_cells_sorted_force(
   /* Pad cache with fake particles that exist outside the cell so will not
    * interact. We use values of the same magnitude (but negative!) as the real
    * particles to avoid overflow problems. */
-  const float pos_padded_j[3] = {-(2. * cj->width[0] + max_dx),
-                                 -(2. * cj->width[1] + max_dx),
-                                 -(2. * cj->width[2] + max_dx)};
-  const float h_padded_j = cj->parts[0].h;
-
   for (int i = last_pj_align + 1; i < last_pj_align + 1 + VEC_SIZE; i++) {
     xj[i] = pos_padded_j[0];
     yj[i] = pos_padded_j[1];
@@ -810,7 +1068,8 @@ cache_read_two_partial_cells_sorted_force(
   }
 }
 
-/* @brief Clean the memory allocated by a #cache object.
+/**
+ * @brief Clean the memory allocated by a #cache object.
  *
  * @param c The #cache to clean.
  */
@@ -831,6 +1090,7 @@ static INLINE void cache_clean(struct cache *c) {
     free(c->balsara);
     free(c->soundspeed);
   }
+  c->count = 0;
 }
 
 #endif /* WITH_VECTORIZATION */
diff --git a/src/cell.c b/src/cell.c
index f7a374e293e13e108b14c733ec066028f8a33c7b..9fd32ed8c6402d51358bcdc888aefaad16abe7c2 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -61,8 +61,10 @@
 #include "scheduler.h"
 #include "space.h"
 #include "space_getsid.h"
+#include "stars.h"
 #include "timers.h"
 #include "tools.h"
+#include "tracers.h"
 
 /* Global variables. */
 int cell_next_tag = 0;
@@ -96,7 +98,15 @@ int cell_getsize(struct cell *c) {
  */
 int cell_link_parts(struct cell *c, struct part *parts) {
 
-  c->parts = parts;
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Linking foreign particles in a local cell!");
+
+  if (c->hydro.parts != NULL)
+    error("Linking parts into a cell that was already linked");
+#endif
+
+  c->hydro.parts = parts;
 
   /* Fill the progeny recursively, depth-first. */
   if (c->split) {
@@ -108,7 +118,7 @@ int cell_link_parts(struct cell *c, struct part *parts) {
   }
 
   /* Return the total number of linked particles. */
-  return c->count;
+  return c->hydro.count;
 }
 
 /**
@@ -121,7 +131,15 @@ int cell_link_parts(struct cell *c, struct part *parts) {
  */
 int cell_link_gparts(struct cell *c, struct gpart *gparts) {
 
-  c->gparts = gparts;
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Linking foreign particles in a local cell!");
+
+  if (c->grav.parts != NULL)
+    error("Linking gparts into a cell that was already linked");
+#endif
+
+  c->grav.parts = gparts;
 
   /* Fill the progeny recursively, depth-first. */
   if (c->split) {
@@ -133,7 +151,7 @@ int cell_link_gparts(struct cell *c, struct gpart *gparts) {
   }
 
   /* Return the total number of linked particles. */
-  return c->gcount;
+  return c->grav.count;
 }
 
 /**
@@ -146,7 +164,15 @@ int cell_link_gparts(struct cell *c, struct gpart *gparts) {
  */
 int cell_link_sparts(struct cell *c, struct spart *sparts) {
 
-  c->sparts = sparts;
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Linking foreign particles in a local cell!");
+
+  if (c->stars.parts != NULL)
+    error("Linking sparts into a cell that was already linked");
+#endif
+
+  c->stars.parts = sparts;
 
   /* Fill the progeny recursively, depth-first. */
   if (c->split) {
@@ -158,7 +184,183 @@ int cell_link_sparts(struct cell *c, struct spart *sparts) {
   }
 
   /* Return the total number of linked particles. */
-  return c->scount;
+  return c->stars.count;
+}
+
+/**
+ * @brief Recurse down foreign cells until reaching one with hydro
+ * tasks; then trigger the linking of the #part array from that
+ * level.
+ *
+ * @param c The #cell.
+ * @param parts The #part array.
+ *
+ * @return The number of particles linked.
+ */
+int cell_link_foreign_parts(struct cell *c, struct part *parts) {
+
+#ifdef WITH_MPI
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Linking foreign particles in a local cell!");
+#endif
+
+  /* Do we have a hydro task at this level? */
+  if (c->mpi.hydro.recv_xv != NULL) {
+
+    /* Recursively attach the parts */
+    const int counts = cell_link_parts(c, parts);
+#ifdef SWIFT_DEBUG_CHECKS
+    if (counts != c->hydro.count)
+      error("Something is wrong with the foreign counts");
+#endif
+    return counts;
+  }
+
+  /* Go deeper to find the level where the tasks are */
+  if (c->split) {
+    int count = 0;
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL) {
+        count += cell_link_foreign_parts(c->progeny[k], &parts[count]);
+      }
+    }
+    return count;
+  } else {
+    return 0;
+  }
+
+#else
+  error("Calling linking of foregin particles in non-MPI mode.");
+#endif
+}
+
+/**
+ * @brief Recurse down foreign cells until reaching one with gravity
+ * tasks; then trigger the linking of the #gpart array from that
+ * level.
+ *
+ * @param c The #cell.
+ * @param gparts The #gpart array.
+ *
+ * @return The number of particles linked.
+ */
+int cell_link_foreign_gparts(struct cell *c, struct gpart *gparts) {
+
+#ifdef WITH_MPI
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Linking foreign particles in a local cell!");
+#endif
+
+  /* Do we have a hydro task at this level? */
+  if (c->mpi.grav.recv != NULL) {
+
+    /* Recursively attach the gparts */
+    const int counts = cell_link_gparts(c, gparts);
+#ifdef SWIFT_DEBUG_CHECKS
+    if (counts != c->grav.count)
+      error("Something is wrong with the foreign counts");
+#endif
+    return counts;
+  }
+
+  /* Go deeper to find the level where the tasks are */
+  if (c->split) {
+    int count = 0;
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL) {
+        count += cell_link_foreign_gparts(c->progeny[k], &gparts[count]);
+      }
+    }
+    return count;
+  } else {
+    return 0;
+  }
+
+#else
+  error("Calling linking of foregin particles in non-MPI mode.");
+#endif
+}
+
+/**
+ * @brief Recursively count the number of #part in foreign cells that
+ * are in cells with hydro-related tasks.
+ *
+ * @param c The #cell.
+ *
+ * @return The number of particles linked.
+ */
+int cell_count_parts_for_tasks(const struct cell *c) {
+
+#ifdef WITH_MPI
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Counting foreign particles in a local cell!");
+#endif
+
+  /* Do we have a hydro task at this level? */
+  if (c->mpi.hydro.recv_xv != NULL) {
+    return c->hydro.count;
+  }
+
+  if (c->split) {
+    int count = 0;
+    for (int k = 0; k < 8; ++k) {
+      if (c->progeny[k] != NULL) {
+        count += cell_count_parts_for_tasks(c->progeny[k]);
+      }
+    }
+    return count;
+  } else {
+    return 0;
+  }
+
+#else
+  error("Calling linking of foregin particles in non-MPI mode.");
+#endif
+}
+
+/**
+ * @brief Recursively count the number of #gpart in foreign cells that
+ * are in cells with gravity-related tasks.
+ *
+ * @param c The #cell.
+ *
+ * @return The number of particles linked.
+ */
+int cell_count_gparts_for_tasks(const struct cell *c) {
+
+#ifdef WITH_MPI
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == engine_rank)
+    error("Counting foreign particles in a local cell!");
+#endif
+
+  /* Do we have a hydro task at this level? */
+  if (c->mpi.grav.recv != NULL) {
+    return c->grav.count;
+  }
+
+  if (c->split) {
+    int count = 0;
+    for (int k = 0; k < 8; ++k) {
+      if (c->progeny[k] != NULL) {
+        count += cell_count_gparts_for_tasks(c->progeny[k]);
+      }
+    }
+    return count;
+  } else {
+    return 0;
+  }
+
+#else
+  error("Calling linking of foregin particles in non-MPI mode.");
+#endif
 }
 
 /**
@@ -167,25 +369,48 @@ int cell_link_sparts(struct cell *c, struct spart *sparts) {
  * @param c The #cell.
  * @param pc Pointer to an array of packed cells in which the
  *      cells will be packed.
+ * @param with_gravity Are we running with gravity and hence need
+ *      to exchange multipoles?
  *
  * @return The number of packed cells.
  */
-int cell_pack(struct cell *restrict c, struct pcell *restrict pc) {
+int cell_pack(struct cell *restrict c, struct pcell *restrict pc,
+              const int with_gravity) {
 
 #ifdef WITH_MPI
 
   /* Start by packing the data of the current cell. */
-  pc->h_max = c->h_max;
-  pc->ti_hydro_end_min = c->ti_hydro_end_min;
-  pc->ti_hydro_end_max = c->ti_hydro_end_max;
-  pc->ti_gravity_end_min = c->ti_gravity_end_min;
-  pc->ti_gravity_end_max = c->ti_gravity_end_max;
-  pc->ti_old_part = c->ti_old_part;
-  pc->ti_old_gpart = c->ti_old_gpart;
-  pc->ti_old_multipole = c->ti_old_multipole;
-  pc->count = c->count;
-  pc->gcount = c->gcount;
-  pc->scount = c->scount;
+  pc->hydro.h_max = c->hydro.h_max;
+  pc->stars.h_max = c->stars.h_max;
+  pc->hydro.ti_end_min = c->hydro.ti_end_min;
+  pc->hydro.ti_end_max = c->hydro.ti_end_max;
+  pc->grav.ti_end_min = c->grav.ti_end_min;
+  pc->grav.ti_end_max = c->grav.ti_end_max;
+  pc->stars.ti_end_min = c->stars.ti_end_min;
+  pc->stars.ti_end_max = c->stars.ti_end_max;
+  pc->hydro.ti_old_part = c->hydro.ti_old_part;
+  pc->grav.ti_old_part = c->grav.ti_old_part;
+  pc->grav.ti_old_multipole = c->grav.ti_old_multipole;
+  pc->stars.ti_old_part = c->stars.ti_old_part;
+  pc->hydro.count = c->hydro.count;
+  pc->grav.count = c->grav.count;
+  pc->stars.count = c->stars.count;
+  pc->maxdepth = c->maxdepth;
+
+  /* Copy the Multipole related information */
+  if (with_gravity) {
+    const struct gravity_tensors *mp = c->grav.multipole;
+
+    pc->grav.m_pole = mp->m_pole;
+    pc->grav.CoM[0] = mp->CoM[0];
+    pc->grav.CoM[1] = mp->CoM[1];
+    pc->grav.CoM[2] = mp->CoM[2];
+    pc->grav.CoM_rebuild[0] = mp->CoM_rebuild[0];
+    pc->grav.CoM_rebuild[1] = mp->CoM_rebuild[1];
+    pc->grav.CoM_rebuild[2] = mp->CoM_rebuild[2];
+    pc->grav.r_max = mp->r_max;
+    pc->grav.r_max_rebuild = mp->r_max_rebuild;
+  }
 
 #ifdef SWIFT_DEBUG_CHECKS
   pc->cellID = c->cellID;
@@ -196,13 +421,13 @@ int cell_pack(struct cell *restrict c, struct pcell *restrict pc) {
   for (int k = 0; k < 8; k++)
     if (c->progeny[k] != NULL) {
       pc->progeny[k] = count;
-      count += cell_pack(c->progeny[k], &pc[count]);
+      count += cell_pack(c->progeny[k], &pc[count], with_gravity);
     } else {
       pc->progeny[k] = -1;
     }
 
   /* Return the number of packed cells used. */
-  c->pcell_size = count;
+  c->mpi.pcell_size = count;
   return count;
 
 #else
@@ -224,7 +449,7 @@ int cell_pack_tags(const struct cell *c, int *tags) {
 #ifdef WITH_MPI
 
   /* Start by packing the data of the current cell. */
-  tags[0] = c->tag;
+  tags[0] = c->mpi.tag;
 
   /* Fill in the progeny, depth-first recursion. */
   int count = 1;
@@ -233,7 +458,7 @@ int cell_pack_tags(const struct cell *c, int *tags) {
       count += cell_pack_tags(c->progeny[k], &tags[count]);
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->pcell_size != count) error("Inconsistent tag and pcell count!");
+  if (c->mpi.pcell_size != count) error("Inconsistent tag and pcell count!");
 #endif  // SWIFT_DEBUG_CHECKS
 
   /* Return the number of packed tags used. */
@@ -251,41 +476,66 @@ int cell_pack_tags(const struct cell *c, int *tags) {
  * @param pc An array of packed #pcell.
  * @param c The #cell in which to unpack the #pcell.
  * @param s The #space in which the cells are created.
+ * @param with_gravity Are we running with gravity and hence need
+ *      to exchange multipoles?
  *
  * @return The number of cells created.
  */
 int cell_unpack(struct pcell *restrict pc, struct cell *restrict c,
-                struct space *restrict s) {
+                struct space *restrict s, const int with_gravity) {
 
 #ifdef WITH_MPI
 
   /* Unpack the current pcell. */
-  c->h_max = pc->h_max;
-  c->ti_hydro_end_min = pc->ti_hydro_end_min;
-  c->ti_hydro_end_max = pc->ti_hydro_end_max;
-  c->ti_gravity_end_min = pc->ti_gravity_end_min;
-  c->ti_gravity_end_max = pc->ti_gravity_end_max;
-  c->ti_old_part = pc->ti_old_part;
-  c->ti_old_gpart = pc->ti_old_gpart;
-  c->ti_old_multipole = pc->ti_old_multipole;
-  c->count = pc->count;
-  c->gcount = pc->gcount;
-  c->scount = pc->scount;
+  c->hydro.h_max = pc->hydro.h_max;
+  c->stars.h_max = pc->stars.h_max;
+  c->hydro.ti_end_min = pc->hydro.ti_end_min;
+  c->hydro.ti_end_max = pc->hydro.ti_end_max;
+  c->grav.ti_end_min = pc->grav.ti_end_min;
+  c->grav.ti_end_max = pc->grav.ti_end_max;
+  c->stars.ti_end_min = pc->stars.ti_end_min;
+  c->stars.ti_end_max = pc->stars.ti_end_max;
+  c->hydro.ti_old_part = pc->hydro.ti_old_part;
+  c->grav.ti_old_part = pc->grav.ti_old_part;
+  c->grav.ti_old_multipole = pc->grav.ti_old_multipole;
+  c->stars.ti_old_part = pc->stars.ti_old_part;
+  c->hydro.count = pc->hydro.count;
+  c->grav.count = pc->grav.count;
+  c->stars.count = pc->stars.count;
+  c->maxdepth = pc->maxdepth;
+
 #ifdef SWIFT_DEBUG_CHECKS
   c->cellID = pc->cellID;
 #endif
 
+  /* Copy the Multipole related information */
+  if (with_gravity) {
+
+    struct gravity_tensors *mp = c->grav.multipole;
+
+    mp->m_pole = pc->grav.m_pole;
+    mp->CoM[0] = pc->grav.CoM[0];
+    mp->CoM[1] = pc->grav.CoM[1];
+    mp->CoM[2] = pc->grav.CoM[2];
+    mp->CoM_rebuild[0] = pc->grav.CoM_rebuild[0];
+    mp->CoM_rebuild[1] = pc->grav.CoM_rebuild[1];
+    mp->CoM_rebuild[2] = pc->grav.CoM_rebuild[2];
+    mp->r_max = pc->grav.r_max;
+    mp->r_max_rebuild = pc->grav.r_max_rebuild;
+  }
+
   /* Number of new cells created. */
   int count = 1;
 
   /* Fill the progeny recursively, depth-first. */
+  c->split = 0;
   for (int k = 0; k < 8; k++)
     if (pc->progeny[k] >= 0) {
       struct cell *temp;
       space_getcells(s, 1, &temp);
-      temp->count = 0;
-      temp->gcount = 0;
-      temp->scount = 0;
+      temp->hydro.count = 0;
+      temp->grav.count = 0;
+      temp->stars.count = 0;
       temp->loc[0] = c->loc[0];
       temp->loc[1] = c->loc[1];
       temp->loc[2] = c->loc[2];
@@ -298,17 +548,19 @@ int cell_unpack(struct pcell *restrict pc, struct cell *restrict c,
       if (k & 1) temp->loc[2] += temp->width[2];
       temp->depth = c->depth + 1;
       temp->split = 0;
-      temp->dx_max_part = 0.f;
-      temp->dx_max_sort = 0.f;
+      temp->hydro.dx_max_part = 0.f;
+      temp->hydro.dx_max_sort = 0.f;
+      temp->stars.dx_max_part = 0.f;
+      temp->stars.dx_max_sort = 0.f;
       temp->nodeID = c->nodeID;
       temp->parent = c;
       c->progeny[k] = temp;
       c->split = 1;
-      count += cell_unpack(&pc[pc->progeny[k]], temp, s);
+      count += cell_unpack(&pc[pc->progeny[k]], temp, s, with_gravity);
     }
 
   /* Return the total number of unpacked cells. */
-  c->pcell_size = count;
+  c->mpi.pcell_size = count;
   return count;
 
 #else
@@ -330,7 +582,7 @@ int cell_unpack_tags(const int *tags, struct cell *restrict c) {
 #ifdef WITH_MPI
 
   /* Unpack the current pcell. */
-  c->tag = tags[0];
+  c->mpi.tag = tags[0];
 
   /* Number of new cells created. */
   int count = 1;
@@ -342,7 +594,7 @@ int cell_unpack_tags(const int *tags, struct cell *restrict c) {
     }
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->pcell_size != count) error("Inconsistent tag and pcell count!");
+  if (c->mpi.pcell_size != count) error("Inconsistent tag and pcell count!");
 #endif  // SWIFT_DEBUG_CHECKS
 
   /* Return the total number of unpacked tags. */
@@ -368,11 +620,14 @@ int cell_pack_end_step(struct cell *restrict c,
 #ifdef WITH_MPI
 
   /* Pack this cell's data. */
-  pcells[0].ti_hydro_end_min = c->ti_hydro_end_min;
-  pcells[0].ti_hydro_end_max = c->ti_hydro_end_max;
-  pcells[0].ti_gravity_end_min = c->ti_gravity_end_min;
-  pcells[0].ti_gravity_end_max = c->ti_gravity_end_max;
-  pcells[0].dx_max_part = c->dx_max_part;
+  pcells[0].hydro.ti_end_min = c->hydro.ti_end_min;
+  pcells[0].hydro.ti_end_max = c->hydro.ti_end_max;
+  pcells[0].grav.ti_end_min = c->grav.ti_end_min;
+  pcells[0].grav.ti_end_max = c->grav.ti_end_max;
+  pcells[0].stars.ti_end_min = c->stars.ti_end_min;
+  pcells[0].stars.ti_end_max = c->stars.ti_end_max;
+  pcells[0].hydro.dx_max_part = c->hydro.dx_max_part;
+  pcells[0].stars.dx_max_part = c->stars.dx_max_part;
 
   /* Fill in the progeny, depth-first recursion. */
   int count = 1;
@@ -404,11 +659,14 @@ int cell_unpack_end_step(struct cell *restrict c,
 #ifdef WITH_MPI
 
   /* Unpack this cell's data. */
-  c->ti_hydro_end_min = pcells[0].ti_hydro_end_min;
-  c->ti_hydro_end_max = pcells[0].ti_hydro_end_max;
-  c->ti_gravity_end_min = pcells[0].ti_gravity_end_min;
-  c->ti_gravity_end_max = pcells[0].ti_gravity_end_max;
-  c->dx_max_part = pcells[0].dx_max_part;
+  c->hydro.ti_end_min = pcells[0].hydro.ti_end_min;
+  c->hydro.ti_end_max = pcells[0].hydro.ti_end_max;
+  c->grav.ti_end_min = pcells[0].grav.ti_end_min;
+  c->grav.ti_end_max = pcells[0].grav.ti_end_max;
+  c->stars.ti_end_min = pcells[0].stars.ti_end_min;
+  c->stars.ti_end_max = pcells[0].stars.ti_end_max;
+  c->hydro.dx_max_part = pcells[0].hydro.dx_max_part;
+  c->stars.dx_max_part = pcells[0].stars.dx_max_part;
 
   /* Fill in the progeny, depth-first recursion. */
   int count = 1;
@@ -441,7 +699,7 @@ int cell_pack_multipoles(struct cell *restrict c,
 #ifdef WITH_MPI
 
   /* Pack this cell's data. */
-  pcells[0] = *c->multipole;
+  pcells[0] = *c->grav.multipole;
 
   /* Fill in the progeny, depth-first recursion. */
   int count = 1;
@@ -473,7 +731,7 @@ int cell_unpack_multipoles(struct cell *restrict c,
 #ifdef WITH_MPI
 
   /* Unpack this cell's data. */
-  *c->multipole = pcells[0];
+  *c->grav.multipole = pcells[0];
 
   /* Fill in the progeny, depth-first recursion. */
   int count = 1;
@@ -502,16 +760,16 @@ int cell_locktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to lock this cell. */
-  if (c->hold || lock_trylock(&c->lock) != 0) {
+  if (c->hydro.hold || lock_trylock(&c->hydro.lock) != 0) {
     TIMER_TOC(timer_locktree);
     return 1;
   }
 
   /* Did somebody hold this cell in the meantime? */
-  if (c->hold) {
+  if (c->hydro.hold) {
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->lock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->hydro.lock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -523,13 +781,13 @@ int cell_locktree(struct cell *c) {
   for (finger = c->parent; finger != NULL; finger = finger->parent) {
 
     /* Lock this cell. */
-    if (lock_trylock(&finger->lock) != 0) break;
+    if (lock_trylock(&finger->hydro.lock) != 0) break;
 
     /* Increment the hold. */
-    atomic_inc(&finger->hold);
+    atomic_inc(&finger->hydro.hold);
 
     /* Unlock the cell. */
-    if (lock_unlock(&finger->lock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&finger->hydro.lock) != 0) error("Failed to unlock cell.");
   }
 
   /* If we reached the top of the tree, we're done. */
@@ -544,10 +802,10 @@ int cell_locktree(struct cell *c) {
     /* Undo the holds up to finger. */
     for (struct cell *finger2 = c->parent; finger2 != finger;
          finger2 = finger2->parent)
-      atomic_dec(&finger2->hold);
+      atomic_dec(&finger2->hydro.hold);
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->lock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->hydro.lock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -566,16 +824,16 @@ int cell_glocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to lock this cell. */
-  if (c->ghold || lock_trylock(&c->glock) != 0) {
+  if (c->grav.phold || lock_trylock(&c->grav.plock) != 0) {
     TIMER_TOC(timer_locktree);
     return 1;
   }
 
   /* Did somebody hold this cell in the meantime? */
-  if (c->ghold) {
+  if (c->grav.phold) {
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->glock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->grav.plock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -587,13 +845,13 @@ int cell_glocktree(struct cell *c) {
   for (finger = c->parent; finger != NULL; finger = finger->parent) {
 
     /* Lock this cell. */
-    if (lock_trylock(&finger->glock) != 0) break;
+    if (lock_trylock(&finger->grav.plock) != 0) break;
 
     /* Increment the hold. */
-    atomic_inc(&finger->ghold);
+    atomic_inc(&finger->grav.phold);
 
     /* Unlock the cell. */
-    if (lock_unlock(&finger->glock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&finger->grav.plock) != 0) error("Failed to unlock cell.");
   }
 
   /* If we reached the top of the tree, we're done. */
@@ -608,10 +866,10 @@ int cell_glocktree(struct cell *c) {
     /* Undo the holds up to finger. */
     for (struct cell *finger2 = c->parent; finger2 != finger;
          finger2 = finger2->parent)
-      atomic_dec(&finger2->ghold);
+      atomic_dec(&finger2->grav.phold);
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->glock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->grav.plock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -630,16 +888,16 @@ int cell_mlocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to lock this cell. */
-  if (c->mhold || lock_trylock(&c->mlock) != 0) {
+  if (c->grav.mhold || lock_trylock(&c->grav.mlock) != 0) {
     TIMER_TOC(timer_locktree);
     return 1;
   }
 
   /* Did somebody hold this cell in the meantime? */
-  if (c->mhold) {
+  if (c->grav.mhold) {
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->mlock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->grav.mlock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -651,13 +909,13 @@ int cell_mlocktree(struct cell *c) {
   for (finger = c->parent; finger != NULL; finger = finger->parent) {
 
     /* Lock this cell. */
-    if (lock_trylock(&finger->mlock) != 0) break;
+    if (lock_trylock(&finger->grav.mlock) != 0) break;
 
     /* Increment the hold. */
-    atomic_inc(&finger->mhold);
+    atomic_inc(&finger->grav.mhold);
 
     /* Unlock the cell. */
-    if (lock_unlock(&finger->mlock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&finger->grav.mlock) != 0) error("Failed to unlock cell.");
   }
 
   /* If we reached the top of the tree, we're done. */
@@ -672,10 +930,10 @@ int cell_mlocktree(struct cell *c) {
     /* Undo the holds up to finger. */
     for (struct cell *finger2 = c->parent; finger2 != finger;
          finger2 = finger2->parent)
-      atomic_dec(&finger2->mhold);
+      atomic_dec(&finger2->grav.mhold);
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->mlock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->grav.mlock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -694,16 +952,16 @@ int cell_slocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to lock this cell. */
-  if (c->shold || lock_trylock(&c->slock) != 0) {
+  if (c->stars.hold || lock_trylock(&c->stars.lock) != 0) {
     TIMER_TOC(timer_locktree);
     return 1;
   }
 
   /* Did somebody hold this cell in the meantime? */
-  if (c->shold) {
+  if (c->stars.hold) {
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->slock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->stars.lock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -715,13 +973,13 @@ int cell_slocktree(struct cell *c) {
   for (finger = c->parent; finger != NULL; finger = finger->parent) {
 
     /* Lock this cell. */
-    if (lock_trylock(&finger->slock) != 0) break;
+    if (lock_trylock(&finger->stars.lock) != 0) break;
 
     /* Increment the hold. */
-    atomic_inc(&finger->shold);
+    atomic_inc(&finger->stars.hold);
 
     /* Unlock the cell. */
-    if (lock_unlock(&finger->slock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&finger->stars.lock) != 0) error("Failed to unlock cell.");
   }
 
   /* If we reached the top of the tree, we're done. */
@@ -736,10 +994,10 @@ int cell_slocktree(struct cell *c) {
     /* Undo the holds up to finger. */
     for (struct cell *finger2 = c->parent; finger2 != finger;
          finger2 = finger2->parent)
-      atomic_dec(&finger2->shold);
+      atomic_dec(&finger2->stars.hold);
 
     /* Unlock this cell. */
-    if (lock_unlock(&c->slock) != 0) error("Failed to unlock cell.");
+    if (lock_unlock(&c->stars.lock) != 0) error("Failed to unlock cell.");
 
     /* Admit defeat. */
     TIMER_TOC(timer_locktree);
@@ -757,11 +1015,11 @@ void cell_unlocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to unlock this cell. */
-  if (lock_unlock(&c->lock) != 0) error("Failed to unlock cell.");
+  if (lock_unlock(&c->hydro.lock) != 0) error("Failed to unlock cell.");
 
   /* Climb up the tree and unhold the parents. */
   for (struct cell *finger = c->parent; finger != NULL; finger = finger->parent)
-    atomic_dec(&finger->hold);
+    atomic_dec(&finger->hydro.hold);
 
   TIMER_TOC(timer_locktree);
 }
@@ -776,11 +1034,11 @@ void cell_gunlocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to unlock this cell. */
-  if (lock_unlock(&c->glock) != 0) error("Failed to unlock cell.");
+  if (lock_unlock(&c->grav.plock) != 0) error("Failed to unlock cell.");
 
   /* Climb up the tree and unhold the parents. */
   for (struct cell *finger = c->parent; finger != NULL; finger = finger->parent)
-    atomic_dec(&finger->ghold);
+    atomic_dec(&finger->grav.phold);
 
   TIMER_TOC(timer_locktree);
 }
@@ -795,11 +1053,11 @@ void cell_munlocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to unlock this cell. */
-  if (lock_unlock(&c->mlock) != 0) error("Failed to unlock cell.");
+  if (lock_unlock(&c->grav.mlock) != 0) error("Failed to unlock cell.");
 
   /* Climb up the tree and unhold the parents. */
   for (struct cell *finger = c->parent; finger != NULL; finger = finger->parent)
-    atomic_dec(&finger->mhold);
+    atomic_dec(&finger->grav.mhold);
 
   TIMER_TOC(timer_locktree);
 }
@@ -814,11 +1072,11 @@ void cell_sunlocktree(struct cell *c) {
   TIMER_TIC
 
   /* First of all, try to unlock this cell. */
-  if (lock_unlock(&c->slock) != 0) error("Failed to unlock cell.");
+  if (lock_unlock(&c->stars.lock) != 0) error("Failed to unlock cell.");
 
   /* Climb up the tree and unhold the parents. */
   for (struct cell *finger = c->parent; finger != NULL; finger = finger->parent)
-    atomic_dec(&finger->shold);
+    atomic_dec(&finger->stars.hold);
 
   TIMER_TOC(timer_locktree);
 }
@@ -828,25 +1086,26 @@ void cell_sunlocktree(struct cell *c) {
  *
  * @param c The #cell array to be sorted.
  * @param parts_offset Offset of the cell parts array relative to the
- *        space's parts array, i.e. c->parts - s->parts.
+ *        space's parts array, i.e. c->hydro.parts - s->parts.
  * @param sparts_offset Offset of the cell sparts array relative to the
- *        space's sparts array, i.e. c->sparts - s->sparts.
- * @param buff A buffer with at least max(c->count, c->gcount) entries,
- *        used for sorting indices.
- * @param sbuff A buffer with at least max(c->scount, c->gcount) entries,
- *        used for sorting indices for the sparts.
- * @param gbuff A buffer with at least max(c->count, c->gcount) entries,
- *        used for sorting indices for the gparts.
+ *        space's sparts array, i.e. c->stars.parts - s->stars.parts.
+ * @param buff A buffer with at least max(c->hydro.count, c->grav.count)
+ * entries, used for sorting indices.
+ * @param sbuff A buffer with at least max(c->stars.count, c->grav.count)
+ * entries, used for sorting indices for the sparts.
+ * @param gbuff A buffer with at least max(c->hydro.count, c->grav.count)
+ * entries, used for sorting indices for the gparts.
  */
 void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
                 struct cell_buff *buff, struct cell_buff *sbuff,
                 struct cell_buff *gbuff) {
 
-  const int count = c->count, gcount = c->gcount, scount = c->scount;
-  struct part *parts = c->parts;
-  struct xpart *xparts = c->xparts;
-  struct gpart *gparts = c->gparts;
-  struct spart *sparts = c->sparts;
+  const int count = c->hydro.count, gcount = c->grav.count,
+            scount = c->stars.count;
+  struct part *parts = c->hydro.parts;
+  struct xpart *xparts = c->hydro.xparts;
+  struct gpart *gparts = c->grav.parts;
+  struct spart *sparts = c->stars.parts;
   const double pivot[3] = {c->loc[0] + c->width[0] / 2,
                            c->loc[1] + c->width[1] / 2,
                            c->loc[2] + c->width[2] / 2};
@@ -921,9 +1180,10 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
 
   /* Store the counts and offsets. */
   for (int k = 0; k < 8; k++) {
-    c->progeny[k]->count = bucket_count[k];
-    c->progeny[k]->parts = &c->parts[bucket_offset[k]];
-    c->progeny[k]->xparts = &c->xparts[bucket_offset[k]];
+    c->progeny[k]->hydro.count = bucket_count[k];
+    c->progeny[k]->hydro.count_total = c->progeny[k]->hydro.count;
+    c->progeny[k]->hydro.parts = &c->hydro.parts[bucket_offset[k]];
+    c->progeny[k]->hydro.xparts = &c->hydro.xparts[bucket_offset[k]];
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -937,54 +1197,55 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
 
   /* Verify that _all_ the parts have been assigned to a cell. */
   for (int k = 1; k < 8; k++)
-    if (&c->progeny[k - 1]->parts[c->progeny[k - 1]->count] !=
-        c->progeny[k]->parts)
+    if (&c->progeny[k - 1]->hydro.parts[c->progeny[k - 1]->hydro.count] !=
+        c->progeny[k]->hydro.parts)
       error("Particle sorting failed (internal consistency).");
-  if (c->progeny[0]->parts != c->parts)
+  if (c->progeny[0]->hydro.parts != c->hydro.parts)
     error("Particle sorting failed (left edge).");
-  if (&c->progeny[7]->parts[c->progeny[7]->count] != &c->parts[count])
+  if (&c->progeny[7]->hydro.parts[c->progeny[7]->hydro.count] !=
+      &c->hydro.parts[count])
     error("Particle sorting failed (right edge).");
 
   /* Verify a few sub-cells. */
-  for (int k = 0; k < c->progeny[0]->count; k++)
-    if (c->progeny[0]->parts[k].x[0] >= pivot[0] ||
-        c->progeny[0]->parts[k].x[1] >= pivot[1] ||
-        c->progeny[0]->parts[k].x[2] >= pivot[2])
+  for (int k = 0; k < c->progeny[0]->hydro.count; k++)
+    if (c->progeny[0]->hydro.parts[k].x[0] >= pivot[0] ||
+        c->progeny[0]->hydro.parts[k].x[1] >= pivot[1] ||
+        c->progeny[0]->hydro.parts[k].x[2] >= pivot[2])
       error("Sorting failed (progeny=0).");
-  for (int k = 0; k < c->progeny[1]->count; k++)
-    if (c->progeny[1]->parts[k].x[0] >= pivot[0] ||
-        c->progeny[1]->parts[k].x[1] >= pivot[1] ||
-        c->progeny[1]->parts[k].x[2] < pivot[2])
+  for (int k = 0; k < c->progeny[1]->hydro.count; k++)
+    if (c->progeny[1]->hydro.parts[k].x[0] >= pivot[0] ||
+        c->progeny[1]->hydro.parts[k].x[1] >= pivot[1] ||
+        c->progeny[1]->hydro.parts[k].x[2] < pivot[2])
       error("Sorting failed (progeny=1).");
-  for (int k = 0; k < c->progeny[2]->count; k++)
-    if (c->progeny[2]->parts[k].x[0] >= pivot[0] ||
-        c->progeny[2]->parts[k].x[1] < pivot[1] ||
-        c->progeny[2]->parts[k].x[2] >= pivot[2])
+  for (int k = 0; k < c->progeny[2]->hydro.count; k++)
+    if (c->progeny[2]->hydro.parts[k].x[0] >= pivot[0] ||
+        c->progeny[2]->hydro.parts[k].x[1] < pivot[1] ||
+        c->progeny[2]->hydro.parts[k].x[2] >= pivot[2])
       error("Sorting failed (progeny=2).");
-  for (int k = 0; k < c->progeny[3]->count; k++)
-    if (c->progeny[3]->parts[k].x[0] >= pivot[0] ||
-        c->progeny[3]->parts[k].x[1] < pivot[1] ||
-        c->progeny[3]->parts[k].x[2] < pivot[2])
+  for (int k = 0; k < c->progeny[3]->hydro.count; k++)
+    if (c->progeny[3]->hydro.parts[k].x[0] >= pivot[0] ||
+        c->progeny[3]->hydro.parts[k].x[1] < pivot[1] ||
+        c->progeny[3]->hydro.parts[k].x[2] < pivot[2])
       error("Sorting failed (progeny=3).");
-  for (int k = 0; k < c->progeny[4]->count; k++)
-    if (c->progeny[4]->parts[k].x[0] < pivot[0] ||
-        c->progeny[4]->parts[k].x[1] >= pivot[1] ||
-        c->progeny[4]->parts[k].x[2] >= pivot[2])
+  for (int k = 0; k < c->progeny[4]->hydro.count; k++)
+    if (c->progeny[4]->hydro.parts[k].x[0] < pivot[0] ||
+        c->progeny[4]->hydro.parts[k].x[1] >= pivot[1] ||
+        c->progeny[4]->hydro.parts[k].x[2] >= pivot[2])
       error("Sorting failed (progeny=4).");
-  for (int k = 0; k < c->progeny[5]->count; k++)
-    if (c->progeny[5]->parts[k].x[0] < pivot[0] ||
-        c->progeny[5]->parts[k].x[1] >= pivot[1] ||
-        c->progeny[5]->parts[k].x[2] < pivot[2])
+  for (int k = 0; k < c->progeny[5]->hydro.count; k++)
+    if (c->progeny[5]->hydro.parts[k].x[0] < pivot[0] ||
+        c->progeny[5]->hydro.parts[k].x[1] >= pivot[1] ||
+        c->progeny[5]->hydro.parts[k].x[2] < pivot[2])
       error("Sorting failed (progeny=5).");
-  for (int k = 0; k < c->progeny[6]->count; k++)
-    if (c->progeny[6]->parts[k].x[0] < pivot[0] ||
-        c->progeny[6]->parts[k].x[1] < pivot[1] ||
-        c->progeny[6]->parts[k].x[2] >= pivot[2])
+  for (int k = 0; k < c->progeny[6]->hydro.count; k++)
+    if (c->progeny[6]->hydro.parts[k].x[0] < pivot[0] ||
+        c->progeny[6]->hydro.parts[k].x[1] < pivot[1] ||
+        c->progeny[6]->hydro.parts[k].x[2] >= pivot[2])
       error("Sorting failed (progeny=6).");
-  for (int k = 0; k < c->progeny[7]->count; k++)
-    if (c->progeny[7]->parts[k].x[0] < pivot[0] ||
-        c->progeny[7]->parts[k].x[1] < pivot[1] ||
-        c->progeny[7]->parts[k].x[2] < pivot[2])
+  for (int k = 0; k < c->progeny[7]->hydro.count; k++)
+    if (c->progeny[7]->hydro.parts[k].x[0] < pivot[0] ||
+        c->progeny[7]->hydro.parts[k].x[1] < pivot[1] ||
+        c->progeny[7]->hydro.parts[k].x[2] < pivot[2])
       error("Sorting failed (progeny=7).");
 #endif
 
@@ -1037,8 +1298,9 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
 
   /* Store the counts and offsets. */
   for (int k = 0; k < 8; k++) {
-    c->progeny[k]->scount = bucket_count[k];
-    c->progeny[k]->sparts = &c->sparts[bucket_offset[k]];
+    c->progeny[k]->stars.count = bucket_count[k];
+    c->progeny[k]->stars.count_total = c->progeny[k]->stars.count;
+    c->progeny[k]->stars.parts = &c->stars.parts[bucket_offset[k]];
   }
 
   /* Finally, do the same song and dance for the gparts. */
@@ -1078,7 +1340,7 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
           if (gparts[j].type == swift_type_gas) {
             parts[-gparts[j].id_or_neg_offset - parts_offset].gpart =
                 &gparts[j];
-          } else if (gparts[j].type == swift_type_star) {
+          } else if (gparts[j].type == swift_type_stars) {
             sparts[-gparts[j].id_or_neg_offset - sparts_offset].gpart =
                 &gparts[j];
           }
@@ -1088,7 +1350,7 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
         gbuff[k] = temp_buff;
         if (gparts[k].type == swift_type_gas) {
           parts[-gparts[k].id_or_neg_offset - parts_offset].gpart = &gparts[k];
-        } else if (gparts[k].type == swift_type_star) {
+        } else if (gparts[k].type == swift_type_stars) {
           sparts[-gparts[k].id_or_neg_offset - sparts_offset].gpart =
               &gparts[k];
         }
@@ -1099,8 +1361,9 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
 
   /* Store the counts and offsets. */
   for (int k = 0; k < 8; k++) {
-    c->progeny[k]->gcount = bucket_count[k];
-    c->progeny[k]->gparts = &c->gparts[bucket_offset[k]];
+    c->progeny[k]->grav.count = bucket_count[k];
+    c->progeny[k]->grav.count_total = c->progeny[k]->grav.count;
+    c->progeny[k]->grav.parts = &c->grav.parts[bucket_offset[k]];
   }
 }
 
@@ -1116,9 +1379,12 @@ void cell_split(struct cell *c, ptrdiff_t parts_offset, ptrdiff_t sparts_offset,
  */
 void cell_sanitize(struct cell *c, int treated) {
 
-  const int count = c->count;
-  struct part *parts = c->parts;
+  const int count = c->hydro.count;
+  const int scount = c->stars.count;
+  struct part *parts = c->hydro.parts;
+  struct spart *sparts = c->stars.parts;
   float h_max = 0.f;
+  float stars_h_max = 0.f;
 
   /* Treat cells will <1000 particles */
   if (count < 1000 && !treated) {
@@ -1131,6 +1397,10 @@ void cell_sanitize(struct cell *c, int treated) {
       if (parts[i].h == 0.f || parts[i].h > upper_h_max)
         parts[i].h = upper_h_max;
     }
+    for (int i = 0; i < scount; ++i) {
+      if (sparts[i].h == 0.f || sparts[i].h > upper_h_max)
+        sparts[i].h = upper_h_max;
+    }
   }
 
   /* Recurse and gather the new h_max values */
@@ -1143,17 +1413,21 @@ void cell_sanitize(struct cell *c, int treated) {
         cell_sanitize(c->progeny[k], (count < 1000));
 
         /* And collect */
-        h_max = max(h_max, c->progeny[k]->h_max);
+        h_max = max(h_max, c->progeny[k]->hydro.h_max);
+        stars_h_max = max(stars_h_max, c->progeny[k]->stars.h_max);
       }
     }
   } else {
 
     /* Get the new value of h_max */
     for (int i = 0; i < count; ++i) h_max = max(h_max, parts[i].h);
+    for (int i = 0; i < scount; ++i)
+      stars_h_max = max(stars_h_max, sparts[i].h);
   }
 
   /* Record the change */
-  c->h_max = h_max;
+  c->hydro.h_max = h_max;
+  c->stars.h_max = stars_h_max;
 }
 
 /**
@@ -1163,10 +1437,14 @@ void cell_sanitize(struct cell *c, int treated) {
  * @param data Unused parameter
  */
 void cell_clean_links(struct cell *c, void *data) {
-  c->density = NULL;
-  c->gradient = NULL;
-  c->force = NULL;
-  c->grav = NULL;
+  c->hydro.density = NULL;
+  c->hydro.gradient = NULL;
+  c->hydro.force = NULL;
+  c->hydro.limiter = NULL;
+  c->grav.grav = NULL;
+  c->grav.mm = NULL;
+  c->stars.density = NULL;
+  c->stars.feedback = NULL;
 }
 
 /**
@@ -1187,21 +1465,25 @@ void cell_check_part_drift_point(struct cell *c, void *data) {
   /* Only check local cells */
   if (c->nodeID != engine_rank) return;
 
-  if (c->ti_old_part != ti_drift)
-    error("Cell in an incorrect time-zone! c->ti_old_part=%lld ti_drift=%lld",
-          c->ti_old_part, ti_drift);
+  /* Only check cells with content */
+  if (c->hydro.count == 0) return;
+
+  if (c->hydro.ti_old_part != ti_drift)
+    error("Cell in an incorrect time-zone! c->hydro.ti_old=%lld ti_drift=%lld",
+          c->hydro.ti_old_part, ti_drift);
 
-  for (int i = 0; i < c->count; ++i)
-    if (c->parts[i].ti_drift != ti_drift)
+  for (int i = 0; i < c->hydro.count; ++i)
+    if (c->hydro.parts[i].ti_drift != ti_drift &&
+        c->hydro.parts[i].time_bin != time_bin_inhibited)
       error("part in an incorrect time-zone! p->ti_drift=%lld ti_drift=%lld",
-            c->parts[i].ti_drift, ti_drift);
+            c->hydro.parts[i].ti_drift, ti_drift);
 #else
   error("Calling debugging code without debugging flag activated.");
 #endif
 }
 
 /**
- * @brief Checks that the #gpart and #spart in a cell are at the
+ * @brief Checks that the #gpart in a cell are at the
  * current point in time
  *
  * Calls error() if the cell is not at the current time.
@@ -1218,19 +1500,57 @@ void cell_check_gpart_drift_point(struct cell *c, void *data) {
   /* Only check local cells */
   if (c->nodeID != engine_rank) return;
 
-  if (c->ti_old_gpart != ti_drift)
-    error("Cell in an incorrect time-zone! c->ti_old_gpart=%lld ti_drift=%lld",
-          c->ti_old_gpart, ti_drift);
+  /* Only check cells with content */
+  if (c->grav.count == 0) return;
 
-  for (int i = 0; i < c->gcount; ++i)
-    if (c->gparts[i].ti_drift != ti_drift)
+  if (c->grav.ti_old_part != ti_drift)
+    error(
+        "Cell in an incorrect time-zone! c->grav.ti_old_part=%lld "
+        "ti_drift=%lld",
+        c->grav.ti_old_part, ti_drift);
+
+  for (int i = 0; i < c->grav.count; ++i)
+    if (c->grav.parts[i].ti_drift != ti_drift &&
+        c->grav.parts[i].time_bin != time_bin_inhibited)
       error("g-part in an incorrect time-zone! gp->ti_drift=%lld ti_drift=%lld",
-            c->gparts[i].ti_drift, ti_drift);
+            c->grav.parts[i].ti_drift, ti_drift);
+#else
+  error("Calling debugging code without debugging flag activated.");
+#endif
+}
+
+/**
+ * @brief Checks that the #spart in a cell are at the
+ * current point in time
+ *
+ * Calls error() if the cell is not at the current time.
+ *
+ * @param c Cell to act upon
+ * @param data The current time on the integer time-line
+ */
+void cell_check_spart_drift_point(struct cell *c, void *data) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  const integertime_t ti_drift = *(integertime_t *)data;
+
+  /* Only check local cells */
+  if (c->nodeID != engine_rank) return;
+
+  /* Only check cells with content */
+  if (c->stars.count == 0) return;
+
+  if (c->stars.ti_old_part != ti_drift)
+    error(
+        "Cell in an incorrect time-zone! c->stars.ti_old_part=%lld "
+        "ti_drift=%lld",
+        c->stars.ti_old_part, ti_drift);
 
-  for (int i = 0; i < c->scount; ++i)
-    if (c->sparts[i].ti_drift != ti_drift)
-      error("s-part in an incorrect time-zone! sp->ti_drift=%lld ti_drift=%lld",
-            c->sparts[i].ti_drift, ti_drift);
+  for (int i = 0; i < c->stars.count; ++i)
+    if (c->stars.parts[i].ti_drift != ti_drift &&
+        c->stars.parts[i].time_bin != time_bin_inhibited)
+      error("g-part in an incorrect time-zone! gp->ti_drift=%lld ti_drift=%lld",
+            c->stars.parts[i].ti_drift, ti_drift);
 #else
   error("Calling debugging code without debugging flag activated.");
 #endif
@@ -1250,11 +1570,18 @@ void cell_check_multipole_drift_point(struct cell *c, void *data) {
 
   const integertime_t ti_drift = *(integertime_t *)data;
 
-  if (c->ti_old_multipole != ti_drift)
+  /* Only check local cells */
+  if (c->nodeID != engine_rank) return;
+
+  /* Only check cells with content */
+  if (c->grav.count == 0) return;
+
+  if (c->grav.ti_old_multipole != ti_drift)
     error(
-        "Cell multipole in an incorrect time-zone! c->ti_old_multipole=%lld "
-        "ti_drift=%lld (depth=%d)",
-        c->ti_old_multipole, ti_drift, c->depth);
+        "Cell multipole in an incorrect time-zone! "
+        "c->grav.ti_old_multipole=%lld "
+        "ti_drift=%lld (depth=%d, node=%d)",
+        c->grav.ti_old_multipole, ti_drift, c->depth, c->nodeID);
 
 #else
   error("Calling debugging code without debugging flag activated.");
@@ -1287,7 +1614,7 @@ void cell_reset_task_counters(struct cell *c) {
 void cell_make_multipoles(struct cell *c, integertime_t ti_current) {
 
   /* Reset everything */
-  gravity_reset(c->multipole);
+  gravity_reset(c->grav.multipole);
 
   if (c->split) {
 
@@ -1303,7 +1630,7 @@ void cell_make_multipoles(struct cell *c, integertime_t ti_current) {
 
     for (int k = 0; k < 8; ++k) {
       if (c->progeny[k] != NULL) {
-        const struct gravity_tensors *m = c->progeny[k]->multipole;
+        const struct gravity_tensors *m = c->progeny[k]->grav.multipole;
         CoM[0] += m->CoM[0] * m->m_pole.M_000;
         CoM[1] += m->CoM[1] * m->m_pole.M_000;
         CoM[2] += m->CoM[2] * m->m_pole.M_000;
@@ -1312,9 +1639,9 @@ void cell_make_multipoles(struct cell *c, integertime_t ti_current) {
     }
 
     const double mass_inv = 1. / mass;
-    c->multipole->CoM[0] = CoM[0] * mass_inv;
-    c->multipole->CoM[1] = CoM[1] * mass_inv;
-    c->multipole->CoM[2] = CoM[2] * mass_inv;
+    c->grav.multipole->CoM[0] = CoM[0] * mass_inv;
+    c->grav.multipole->CoM[1] = CoM[1] * mass_inv;
+    c->grav.multipole->CoM[2] = CoM[2] * mass_inv;
 
     /* Now shift progeny multipoles and add them up */
     struct multipole temp;
@@ -1322,64 +1649,112 @@ void cell_make_multipoles(struct cell *c, integertime_t ti_current) {
     for (int k = 0; k < 8; ++k) {
       if (c->progeny[k] != NULL) {
         const struct cell *cp = c->progeny[k];
-        const struct multipole *m = &cp->multipole->m_pole;
+        const struct multipole *m = &cp->grav.multipole->m_pole;
 
         /* Contribution to multipole */
-        gravity_M2M(&temp, m, c->multipole->CoM, cp->multipole->CoM);
-        gravity_multipole_add(&c->multipole->m_pole, &temp);
+        gravity_M2M(&temp, m, c->grav.multipole->CoM, cp->grav.multipole->CoM);
+        gravity_multipole_add(&c->grav.multipole->m_pole, &temp);
 
         /* Upper limit of max CoM<->gpart distance */
-        const double dx = c->multipole->CoM[0] - cp->multipole->CoM[0];
-        const double dy = c->multipole->CoM[1] - cp->multipole->CoM[1];
-        const double dz = c->multipole->CoM[2] - cp->multipole->CoM[2];
+        const double dx =
+            c->grav.multipole->CoM[0] - cp->grav.multipole->CoM[0];
+        const double dy =
+            c->grav.multipole->CoM[1] - cp->grav.multipole->CoM[1];
+        const double dz =
+            c->grav.multipole->CoM[2] - cp->grav.multipole->CoM[2];
         const double r2 = dx * dx + dy * dy + dz * dz;
-        r_max = max(r_max, cp->multipole->r_max + sqrt(r2));
+        r_max = max(r_max, cp->grav.multipole->r_max + sqrt(r2));
       }
     }
     /* Alternative upper limit of max CoM<->gpart distance */
-    const double dx = c->multipole->CoM[0] > c->loc[0] + c->width[0] * 0.5
-                          ? c->multipole->CoM[0] - c->loc[0]
-                          : c->loc[0] + c->width[0] - c->multipole->CoM[0];
-    const double dy = c->multipole->CoM[1] > c->loc[1] + c->width[1] * 0.5
-                          ? c->multipole->CoM[1] - c->loc[1]
-                          : c->loc[1] + c->width[1] - c->multipole->CoM[1];
-    const double dz = c->multipole->CoM[2] > c->loc[2] + c->width[2] * 0.5
-                          ? c->multipole->CoM[2] - c->loc[2]
-                          : c->loc[2] + c->width[2] - c->multipole->CoM[2];
+    const double dx = c->grav.multipole->CoM[0] > c->loc[0] + c->width[0] * 0.5
+                          ? c->grav.multipole->CoM[0] - c->loc[0]
+                          : c->loc[0] + c->width[0] - c->grav.multipole->CoM[0];
+    const double dy = c->grav.multipole->CoM[1] > c->loc[1] + c->width[1] * 0.5
+                          ? c->grav.multipole->CoM[1] - c->loc[1]
+                          : c->loc[1] + c->width[1] - c->grav.multipole->CoM[1];
+    const double dz = c->grav.multipole->CoM[2] > c->loc[2] + c->width[2] * 0.5
+                          ? c->grav.multipole->CoM[2] - c->loc[2]
+                          : c->loc[2] + c->width[2] - c->grav.multipole->CoM[2];
 
     /* Take minimum of both limits */
-    c->multipole->r_max = min(r_max, sqrt(dx * dx + dy * dy + dz * dz));
+    c->grav.multipole->r_max = min(r_max, sqrt(dx * dx + dy * dy + dz * dz));
 
   } else {
 
-    if (c->gcount > 0) {
-      gravity_P2M(c->multipole, c->gparts, c->gcount);
-      const double dx = c->multipole->CoM[0] > c->loc[0] + c->width[0] * 0.5
-                            ? c->multipole->CoM[0] - c->loc[0]
-                            : c->loc[0] + c->width[0] - c->multipole->CoM[0];
-      const double dy = c->multipole->CoM[1] > c->loc[1] + c->width[1] * 0.5
-                            ? c->multipole->CoM[1] - c->loc[1]
-                            : c->loc[1] + c->width[1] - c->multipole->CoM[1];
-      const double dz = c->multipole->CoM[2] > c->loc[2] + c->width[2] * 0.5
-                            ? c->multipole->CoM[2] - c->loc[2]
-                            : c->loc[2] + c->width[2] - c->multipole->CoM[2];
-      c->multipole->r_max = sqrt(dx * dx + dy * dy + dz * dz);
+    if (c->grav.count > 0) {
+      gravity_P2M(c->grav.multipole, c->grav.parts, c->grav.count);
+      const double dx =
+          c->grav.multipole->CoM[0] > c->loc[0] + c->width[0] * 0.5
+              ? c->grav.multipole->CoM[0] - c->loc[0]
+              : c->loc[0] + c->width[0] - c->grav.multipole->CoM[0];
+      const double dy =
+          c->grav.multipole->CoM[1] > c->loc[1] + c->width[1] * 0.5
+              ? c->grav.multipole->CoM[1] - c->loc[1]
+              : c->loc[1] + c->width[1] - c->grav.multipole->CoM[1];
+      const double dz =
+          c->grav.multipole->CoM[2] > c->loc[2] + c->width[2] * 0.5
+              ? c->grav.multipole->CoM[2] - c->loc[2]
+              : c->loc[2] + c->width[2] - c->grav.multipole->CoM[2];
+      c->grav.multipole->r_max = sqrt(dx * dx + dy * dy + dz * dz);
     } else {
-      gravity_multipole_init(&c->multipole->m_pole);
-      c->multipole->CoM[0] = c->loc[0] + c->width[0] * 0.5;
-      c->multipole->CoM[1] = c->loc[1] + c->width[1] * 0.5;
-      c->multipole->CoM[2] = c->loc[2] + c->width[2] * 0.5;
-      c->multipole->r_max = 0.;
+      gravity_multipole_init(&c->grav.multipole->m_pole);
+      c->grav.multipole->CoM[0] = c->loc[0] + c->width[0] * 0.5;
+      c->grav.multipole->CoM[1] = c->loc[1] + c->width[1] * 0.5;
+      c->grav.multipole->CoM[2] = c->loc[2] + c->width[2] * 0.5;
+      c->grav.multipole->r_max = 0.;
     }
   }
 
   /* Also update the values at rebuild time */
-  c->multipole->r_max_rebuild = c->multipole->r_max;
-  c->multipole->CoM_rebuild[0] = c->multipole->CoM[0];
-  c->multipole->CoM_rebuild[1] = c->multipole->CoM[1];
-  c->multipole->CoM_rebuild[2] = c->multipole->CoM[2];
+  c->grav.multipole->r_max_rebuild = c->grav.multipole->r_max;
+  c->grav.multipole->CoM_rebuild[0] = c->grav.multipole->CoM[0];
+  c->grav.multipole->CoM_rebuild[1] = c->grav.multipole->CoM[1];
+  c->grav.multipole->CoM_rebuild[2] = c->grav.multipole->CoM[2];
+
+  c->grav.ti_old_multipole = ti_current;
+}
+
+/**
+ * @brief Recursively verify that the multipoles are the sum of their progenies.
+ *
+ * This function does not check whether the multipoles match the particle
+ * content as we may not have received the particles.
+ *
+ * @param c The #cell to recursively search and verify.
+ */
+void cell_check_foreign_multipole(const struct cell *c) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  if (c->split) {
+
+    double M_000 = 0.;
+    long long num_gpart = 0;
+
+    for (int k = 0; k < 8; k++) {
+      const struct cell *cp = c->progeny[k];
+
+      if (cp != NULL) {
+
+        /* Check the mass */
+        M_000 += cp->grav.multipole->m_pole.M_000;
+
+        /* Check the number of particles */
+        num_gpart += cp->grav.multipole->m_pole.num_gpart;
+
+        /* Now recurse */
+        cell_check_foreign_multipole(cp);
+      }
+    }
+
+    if (num_gpart != c->grav.multipole->m_pole.num_gpart)
+      error("Sum of particles in progenies does not match");
+  }
 
-  c->ti_old_multipole = ti_current;
+#else
+  error("Calling debugging code without debugging flag activated.");
+#endif
 }
 
 /**
@@ -1387,44 +1762,41 @@ void cell_make_multipoles(struct cell *c, integertime_t ti_current) {
  * recursively computed one.
  *
  * @param c Cell to act upon
- * @param data Unused parameter
  */
-void cell_check_multipole(struct cell *c, void *data) {
+void cell_check_multipole(struct cell *c) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   struct gravity_tensors ma;
   const double tolerance = 1e-3; /* Relative */
 
-  return;
-
   /* First recurse */
   if (c->split)
     for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL) cell_check_multipole(c->progeny[k], NULL);
+      if (c->progeny[k] != NULL) cell_check_multipole(c->progeny[k]);
 
-  if (c->gcount > 0) {
+  if (c->grav.count > 0) {
 
     /* Brute-force calculation */
-    gravity_P2M(&ma, c->gparts, c->gcount);
+    gravity_P2M(&ma, c->grav.parts, c->grav.count);
 
     /* Now  compare the multipole expansion */
-    if (!gravity_multipole_equal(&ma, c->multipole, tolerance)) {
+    if (!gravity_multipole_equal(&ma, c->grav.multipole, tolerance)) {
       message("Multipoles are not equal at depth=%d! tol=%f", c->depth,
               tolerance);
       message("Correct answer:");
       gravity_multipole_print(&ma.m_pole);
       message("Recursive multipole:");
-      gravity_multipole_print(&c->multipole->m_pole);
+      gravity_multipole_print(&c->grav.multipole->m_pole);
       error("Aborting");
     }
 
     /* Check that the upper limit of r_max is good enough */
-    if (!(c->multipole->r_max >= ma.r_max)) {
+    if (!(1.1 * c->grav.multipole->r_max >= ma.r_max)) {
       error("Upper-limit r_max=%e too small. Should be >=%e.",
-            c->multipole->r_max, ma.r_max);
-    } else if (c->multipole->r_max * c->multipole->r_max >
+            c->grav.multipole->r_max, ma.r_max);
+    } else if (c->grav.multipole->r_max * c->grav.multipole->r_max >
                3. * c->width[0] * c->width[0]) {
-      error("r_max=%e larger than cell diagonal %e.", c->multipole->r_max,
+      error("r_max=%e larger than cell diagonal %e.", c->grav.multipole->r_max,
             sqrt(3. * c->width[0] * c->width[0]));
     }
   }
@@ -1440,10 +1812,18 @@ void cell_check_multipole(struct cell *c, void *data) {
  */
 void cell_clean(struct cell *c) {
 
+  /* Hydro */
+  for (int i = 0; i < 13; i++)
+    if (c->hydro.sort[i] != NULL) {
+      free(c->hydro.sort[i]);
+      c->hydro.sort[i] = NULL;
+    }
+
+  /* Stars */
   for (int i = 0; i < 13; i++)
-    if (c->sort[i] != NULL) {
-      free(c->sort[i]);
-      c->sort[i] = NULL;
+    if (c->stars.sort[i] != NULL) {
+      free(c->stars.sort[i]);
+      c->stars.sort[i] = NULL;
     }
 
   /* Recurse */
@@ -1455,10 +1835,20 @@ void cell_clean(struct cell *c) {
  * @brief Clear the drift flags on the given cell.
  */
 void cell_clear_drift_flags(struct cell *c, void *data) {
-  c->do_drift = 0;
-  c->do_sub_drift = 0;
-  c->do_grav_drift = 0;
-  c->do_grav_sub_drift = 0;
+  c->hydro.do_drift = 0;
+  c->hydro.do_sub_drift = 0;
+  c->grav.do_drift = 0;
+  c->grav.do_sub_drift = 0;
+  c->stars.do_drift = 0;
+  c->stars.do_sub_drift = 0;
+}
+
+/**
+ * @brief Clear the limiter flags on the given cell.
+ */
+void cell_clear_limiter_flags(struct cell *c, void *data) {
+  c->hydro.do_limiter = 0;
+  c->hydro.do_sub_limiter = 0;
 }
 
 /**
@@ -1467,29 +1857,33 @@ void cell_clear_drift_flags(struct cell *c, void *data) {
 void cell_activate_drift_part(struct cell *c, struct scheduler *s) {
 
   /* If this cell is already marked for drift, quit early. */
-  if (c->do_drift) return;
+  if (c->hydro.do_drift) return;
 
   /* Mark this cell for drifting. */
-  c->do_drift = 1;
+  c->hydro.do_drift = 1;
 
   /* Set the do_sub_drifts all the way up and activate the super drift
      if this has not yet been done. */
-  if (c == c->super_hydro) {
+  if (c == c->hydro.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-    if (c->drift_part == NULL)
-      error("Trying to activate un-existing c->drift_part");
+    if (c->hydro.drift == NULL)
+      error("Trying to activate un-existing c->hydro.drift");
 #endif
-    scheduler_activate(s, c->drift_part);
+    scheduler_activate(s, c->hydro.drift);
   } else {
     for (struct cell *parent = c->parent;
-         parent != NULL && !parent->do_sub_drift; parent = parent->parent) {
-      parent->do_sub_drift = 1;
-      if (parent == c->super_hydro) {
+         parent != NULL && !parent->hydro.do_sub_drift;
+         parent = parent->parent) {
+
+      /* Mark this cell for drifting */
+      parent->hydro.do_sub_drift = 1;
+
+      if (parent == c->hydro.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-        if (parent->drift_part == NULL)
-          error("Trying to activate un-existing parent->drift_part");
+        if (parent->hydro.drift == NULL)
+          error("Trying to activate un-existing parent->hydro.drift");
 #endif
-        scheduler_activate(s, parent->drift_part);
+        scheduler_activate(s, parent->hydro.drift);
         break;
       }
     }
@@ -1502,30 +1896,37 @@ void cell_activate_drift_part(struct cell *c, struct scheduler *s) {
 void cell_activate_drift_gpart(struct cell *c, struct scheduler *s) {
 
   /* If this cell is already marked for drift, quit early. */
-  if (c->do_grav_drift) return;
+  if (c->grav.do_drift) return;
 
   /* Mark this cell for drifting. */
-  c->do_grav_drift = 1;
+  c->grav.do_drift = 1;
+
+  if (c->grav.drift_out != NULL) scheduler_activate(s, c->grav.drift_out);
 
   /* Set the do_grav_sub_drifts all the way up and activate the super drift
      if this has not yet been done. */
-  if (c == c->super_gravity) {
+  if (c == c->grav.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-    if (c->drift_gpart == NULL)
-      error("Trying to activate un-existing c->drift_gpart");
+    if (c->grav.drift == NULL)
+      error("Trying to activate un-existing c->grav.drift");
 #endif
-    scheduler_activate(s, c->drift_gpart);
+    scheduler_activate(s, c->grav.drift);
   } else {
     for (struct cell *parent = c->parent;
-         parent != NULL && !parent->do_grav_sub_drift;
+         parent != NULL && !parent->grav.do_sub_drift;
          parent = parent->parent) {
-      parent->do_grav_sub_drift = 1;
-      if (parent == c->super_gravity) {
+      parent->grav.do_sub_drift = 1;
+
+      if (parent->grav.drift_out) {
+        scheduler_activate(s, parent->grav.drift_out);
+      }
+
+      if (parent == c->grav.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-        if (parent->drift_gpart == NULL)
-          error("Trying to activate un-existing parent->drift_gpart");
+        if (parent->grav.drift == NULL)
+          error("Trying to activate un-existing parent->grav.drift");
 #endif
-        scheduler_activate(s, parent->drift_gpart);
+        scheduler_activate(s, parent->grav.drift);
         break;
       }
     }
@@ -1533,28 +1934,38 @@ void cell_activate_drift_gpart(struct cell *c, struct scheduler *s) {
 }
 
 /**
- * @brief Activate the sorts up a cell hierarchy.
+ * @brief Activate the #spart drifts on the given cell.
  */
-void cell_activate_sorts_up(struct cell *c, struct scheduler *s) {
+void cell_activate_drift_spart(struct cell *c, struct scheduler *s) {
 
-  if (c == c->super_hydro) {
+  /* If this cell is already marked for drift, quit early. */
+  if (c->stars.do_drift) return;
+
+  /* Mark this cell for drifting. */
+  c->stars.do_drift = 1;
+
+  /* Set the do_stars_sub_drifts all the way up and activate the super drift
+     if this has not yet been done. */
+  if (c == c->hydro.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-    if (c->sorts == NULL) error("Trying to activate un-existing c->sorts");
+    if (c->stars.drift == NULL)
+      error("Trying to activate un-existing c->stars.drift");
 #endif
-    scheduler_activate(s, c->sorts);
-    if (c->nodeID == engine_rank) cell_activate_drift_part(c, s);
+    scheduler_activate(s, c->stars.drift);
   } else {
-
     for (struct cell *parent = c->parent;
-         parent != NULL && !parent->do_sub_sort; parent = parent->parent) {
-      parent->do_sub_sort = 1;
-      if (parent == c->super_hydro) {
+         parent != NULL && !parent->stars.do_sub_drift;
+         parent = parent->parent) {
+
+      /* Mark this cell for drifting */
+      parent->stars.do_sub_drift = 1;
+
+      if (parent == c->hydro.super) {
 #ifdef SWIFT_DEBUG_CHECKS
-        if (parent->sorts == NULL)
-          error("Trying to activate un-existing parents->sorts");
+        if (parent->stars.drift == NULL)
+          error("Trying to activate un-existing parent->stars.drift");
 #endif
-        scheduler_activate(s, parent->sorts);
-        if (parent->nodeID == engine_rank) cell_activate_drift_part(parent, s);
+        scheduler_activate(s, parent->stars.drift);
         break;
       }
     }
@@ -1562,56 +1973,188 @@ void cell_activate_sorts_up(struct cell *c, struct scheduler *s) {
 }
 
 /**
- * @brief Activate the sorts on a given cell, if needed.
+ * @brief Activate the drifts on the given cell.
  */
-void cell_activate_sorts(struct cell *c, int sid, struct scheduler *s) {
+void cell_activate_limiter(struct cell *c, struct scheduler *s) {
 
-  /* Do we need to re-sort? */
-  if (c->dx_max_sort > space_maxreldx * c->dmin) {
+  /* If this cell is already marked for limiting, quit early. */
+  if (c->hydro.do_limiter) return;
 
-    /* Climb up the tree to active the sorts in that direction */
-    for (struct cell *finger = c; finger != NULL; finger = finger->parent) {
-      if (finger->requires_sorts) {
-        atomic_or(&finger->do_sort, finger->requires_sorts);
-        cell_activate_sorts_up(finger, s);
+  /* Mark this cell for limiting. */
+  c->hydro.do_limiter = 1;
+
+  /* Set the do_sub_limiter all the way up and activate the super limiter
+     if this has not yet been done. */
+  if (c == c->super) {
+#ifdef SWIFT_DEBUG_CHECKS
+    if (c->timestep_limiter == NULL)
+      error("Trying to activate un-existing c->timestep_limiter");
+#endif
+    scheduler_activate(s, c->timestep_limiter);
+  } else {
+    for (struct cell *parent = c->parent;
+         parent != NULL && !parent->hydro.do_sub_limiter;
+         parent = parent->parent) {
+
+      /* Mark this cell for limiting */
+      parent->hydro.do_sub_limiter = 1;
+
+      if (parent == c->super) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (parent->timestep_limiter == NULL)
+          error("Trying to activate un-existing parent->timestep_limiter");
+#endif
+        scheduler_activate(s, parent->timestep_limiter);
+        break;
       }
-      finger->sorted = 0;
     }
   }
-
-  /* Has this cell been sorted at all for the given sid? */
-  if (!(c->sorted & (1 << sid)) || c->nodeID != engine_rank) {
-    atomic_or(&c->do_sort, (1 << sid));
-    cell_activate_sorts_up(c, s);
-  }
 }
 
 /**
- * @brief Traverse a sub-cell task and activate the hydro drift tasks that are
- * required
- * by a hydro task
- *
- * @param ci The first #cell we recurse in.
- * @param cj The second #cell we recurse in.
- * @param s The task #scheduler.
+ * @brief Activate the sorts up a cell hierarchy.
  */
-void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
-                                       struct scheduler *s) {
-  const struct engine *e = s->space->e;
+void cell_activate_hydro_sorts_up(struct cell *c, struct scheduler *s) {
+
+  if (c == c->hydro.super) {
+#ifdef SWIFT_DEBUG_CHECKS
+    if (c->hydro.sorts == NULL)
+      error("Trying to activate un-existing c->hydro.sorts");
+#endif
+    scheduler_activate(s, c->hydro.sorts);
+    if (c->nodeID == engine_rank) cell_activate_drift_part(c, s);
+  } else {
+
+    for (struct cell *parent = c->parent;
+         parent != NULL && !parent->hydro.do_sub_sort;
+         parent = parent->parent) {
+      parent->hydro.do_sub_sort = 1;
+      if (parent == c->hydro.super) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (parent->hydro.sorts == NULL)
+          error("Trying to activate un-existing parents->hydro.sorts");
+#endif
+        scheduler_activate(s, parent->hydro.sorts);
+        if (parent->nodeID == engine_rank) cell_activate_drift_part(parent, s);
+        break;
+      }
+    }
+  }
+}
+
+/**
+ * @brief Activate the sorts on a given cell, if needed.
+ */
+void cell_activate_hydro_sorts(struct cell *c, int sid, struct scheduler *s) {
+
+  /* Do we need to re-sort? */
+  if (c->hydro.dx_max_sort > space_maxreldx * c->dmin) {
+
+    /* Climb up the tree to active the sorts in that direction */
+    for (struct cell *finger = c; finger != NULL; finger = finger->parent) {
+      if (finger->hydro.requires_sorts) {
+        atomic_or(&finger->hydro.do_sort, finger->hydro.requires_sorts);
+        cell_activate_hydro_sorts_up(finger, s);
+      }
+      finger->hydro.sorted = 0;
+    }
+  }
+
+  /* Has this cell been sorted at all for the given sid? */
+  if (!(c->hydro.sorted & (1 << sid)) || c->nodeID != engine_rank) {
+
+    atomic_or(&c->hydro.do_sort, (1 << sid));
+    cell_activate_hydro_sorts_up(c, s);
+  }
+}
+
+/**
+ * @brief Activate the sorts up a cell hierarchy.
+ */
+void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
+
+  if (c == c->hydro.super) {
+#ifdef SWIFT_DEBUG_CHECKS
+    if (c->stars.sorts == NULL)
+      error("Trying to activate un-existing c->stars.sorts");
+#endif
+    scheduler_activate(s, c->stars.sorts);
+    if (c->nodeID == engine_rank) {
+      // MATTHIEU: to do: do we actually need both drifts here?
+      cell_activate_drift_spart(c, s);
+    }
+  } else {
+
+    for (struct cell *parent = c->parent;
+         parent != NULL && !parent->stars.do_sub_sort;
+         parent = parent->parent) {
+      parent->stars.do_sub_sort = 1;
+      if (parent == c->hydro.super) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (parent->stars.sorts == NULL)
+          error("Trying to activate un-existing parents->stars.sorts");
+#endif
+        scheduler_activate(s, parent->stars.sorts);
+        if (parent->nodeID == engine_rank) cell_activate_drift_spart(parent, s);
+        break;
+      }
+    }
+  }
+}
+
+/**
+ * @brief Activate the sorts on a given cell, if needed.
+ */
+void cell_activate_stars_sorts(struct cell *c, int sid, struct scheduler *s) {
+
+  /* Do we need to re-sort? */
+  if (c->stars.dx_max_sort > space_maxreldx * c->dmin) {
+
+    /* Climb up the tree to active the sorts in that direction */
+    for (struct cell *finger = c; finger != NULL; finger = finger->parent) {
+      if (finger->stars.requires_sorts) {
+        atomic_or(&finger->stars.do_sort, finger->stars.requires_sorts);
+        cell_activate_stars_sorts_up(finger, s);
+      }
+      finger->stars.sorted = 0;
+    }
+  }
+
+  /* Has this cell been sorted at all for the given sid? */
+  if (!(c->stars.sorted & (1 << sid)) || c->nodeID != engine_rank) {
+
+    atomic_or(&c->stars.do_sort, (1 << sid));
+    cell_activate_stars_sorts_up(c, s);
+  }
+}
+
+/**
+ * @brief Traverse a sub-cell task and activate the hydro drift tasks that are
+ * required by a hydro task
+ *
+ * @param ci The first #cell we recurse in.
+ * @param cj The second #cell we recurse in.
+ * @param s The task #scheduler.
+ */
+void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
+                                       struct scheduler *s) {
+  const struct engine *e = s->space->e;
+  const int with_limiter = (e->policy & engine_policy_limiter);
+
+  /* Store the current dx_max and h_max values. */
+  ci->hydro.dx_max_part_old = ci->hydro.dx_max_part;
+  ci->hydro.h_max_old = ci->hydro.h_max;
 
-  /* Store the current dx_max and h_max values. */
-  ci->dx_max_old = ci->dx_max_part;
-  ci->h_max_old = ci->h_max;
   if (cj != NULL) {
-    cj->dx_max_old = cj->dx_max_part;
-    cj->h_max_old = cj->h_max;
+    cj->hydro.dx_max_part_old = cj->hydro.dx_max_part;
+    cj->hydro.h_max_old = cj->hydro.h_max;
   }
 
   /* Self interaction? */
   if (cj == NULL) {
 
     /* Do anything? */
-    if (ci->count == 0 || !cell_is_active_hydro(ci, e)) return;
+    if (ci->hydro.count == 0 || !cell_is_active_hydro(ci, e)) return;
 
     /* Recurse? */
     if (cell_can_recurse_in_self_hydro_task(ci)) {
@@ -1630,6 +2173,7 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
 
       /* We have reached the bottom of the tree: activate drift */
       cell_activate_drift_part(ci, s);
+      if (with_limiter) cell_activate_limiter(ci, s);
     }
   }
 
@@ -1638,7 +2182,7 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
 
     /* Should we even bother? */
     if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
-    if (ci->count == 0 || cj->count == 0) return;
+    if (ci->hydro.count == 0 || cj->hydro.count == 0) return;
 
     /* Get the orientation of the pair. */
     double shift[3];
@@ -1926,152 +2470,520 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
     else if (cell_is_active_hydro(ci, e) || cell_is_active_hydro(cj, e)) {
 
       /* We are going to interact this pair, so store some values. */
-      atomic_or(&ci->requires_sorts, 1 << sid);
-      atomic_or(&cj->requires_sorts, 1 << sid);
-      ci->dx_max_sort_old = ci->dx_max_sort;
-      cj->dx_max_sort_old = cj->dx_max_sort;
+      atomic_or(&ci->hydro.requires_sorts, 1 << sid);
+      atomic_or(&cj->hydro.requires_sorts, 1 << sid);
+      ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+      cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
 
       /* Activate the drifts if the cells are local. */
       if (ci->nodeID == engine_rank) cell_activate_drift_part(ci, s);
       if (cj->nodeID == engine_rank) cell_activate_drift_part(cj, s);
 
+      /* Also activate the time-step limiter */
+      if (ci->nodeID == engine_rank && with_limiter)
+        cell_activate_limiter(ci, s);
+      if (cj->nodeID == engine_rank && with_limiter)
+        cell_activate_limiter(cj, s);
+
       /* Do we need to sort the cells? */
-      cell_activate_sorts(ci, sid, s);
-      cell_activate_sorts(cj, sid, s);
+      cell_activate_hydro_sorts(ci, sid, s);
+      cell_activate_hydro_sorts(cj, sid, s);
     }
   } /* Otherwise, pair interation */
 }
 
-void cell_activate_grav_mm_task(struct cell *ci, struct cell *cj,
-                                struct scheduler *s) {
-  /* Some constants */
-  const struct engine *e = s->space->e;
-
-  /* Anything to do here? */
-  if (!cell_is_active_gravity(ci, e) && !cell_is_active_gravity(cj, e))
-    error("Inactive MM task being activated");
-
-  /* Atomically drift the multipole in ci */
-  lock_lock(&ci->mlock);
-  if (ci->ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
-  if (lock_unlock(&ci->mlock) != 0) error("Impossible to unlock m-pole");
-
-  /* Atomically drift the multipole in cj */
-  lock_lock(&cj->mlock);
-  if (cj->ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
-  if (lock_unlock(&cj->mlock) != 0) error("Impossible to unlock m-pole");
-}
-
 /**
- * @brief Traverse a sub-cell task and activate the gravity drift tasks that
- * are required by a self gravity task.
+ * @brief Traverse a sub-cell task and activate the stars drift tasks that are
+ * required by a stars task
  *
  * @param ci The first #cell we recurse in.
  * @param cj The second #cell we recurse in.
  * @param s The task #scheduler.
  */
-void cell_activate_subcell_grav_tasks(struct cell *ci, struct cell *cj,
-                                      struct scheduler *s) {
-  /* Some constants */
-  const struct space *sp = s->space;
-  const struct engine *e = sp->e;
+void cell_activate_subcell_stars_tasks(struct cell *ci, struct cell *cj,
+                                       struct scheduler *s) {
+  const struct engine *e = s->space->e;
+
+  /* Store the current dx_max and h_max values. */
+  ci->stars.dx_max_part_old = ci->stars.dx_max_part;
+  ci->stars.h_max_old = ci->stars.h_max;
+  ci->hydro.dx_max_part_old = ci->hydro.dx_max_part;
+  ci->hydro.h_max_old = ci->hydro.h_max;
+
+  if (cj != NULL) {
+    cj->stars.dx_max_part_old = cj->stars.dx_max_part;
+    cj->stars.h_max_old = cj->stars.h_max;
+    cj->hydro.dx_max_part_old = cj->hydro.dx_max_part;
+    cj->hydro.h_max_old = cj->hydro.h_max;
+  }
 
   /* Self interaction? */
   if (cj == NULL) {
 
     /* Do anything? */
-    if (ci->gcount == 0 || !cell_is_active_gravity(ci, e)) return;
+    if (!cell_is_active_stars(ci, e) || ci->hydro.count == 0 ||
+        ci->stars.count == 0)
+      return;
 
     /* Recurse? */
-    if (ci->split) {
+    if (cell_can_recurse_in_self_stars_task(ci)) {
 
       /* Loop over all progenies and pairs of progenies */
       for (int j = 0; j < 8; j++) {
         if (ci->progeny[j] != NULL) {
-          cell_activate_subcell_grav_tasks(ci->progeny[j], NULL, s);
+          cell_activate_subcell_stars_tasks(ci->progeny[j], NULL, s);
           for (int k = j + 1; k < 8; k++)
             if (ci->progeny[k] != NULL)
-              cell_activate_subcell_grav_tasks(ci->progeny[j], ci->progeny[k],
-                                               s);
+              cell_activate_subcell_stars_tasks(ci->progeny[j], ci->progeny[k],
+                                                s);
         }
       }
     } else {
 
-      /* We have reached the bottom of the tree: activate gpart drift */
-      cell_activate_drift_gpart(ci, s);
+      /* We have reached the bottom of the tree: activate drift */
+      cell_activate_drift_spart(ci, s);
+      cell_activate_drift_part(ci, s);
     }
   }
 
-  /* Pair interaction */
+  /* Otherwise, pair interation */
   else {
 
-    /* Anything to do here? */
-    if (!cell_is_active_gravity(ci, e) && !cell_is_active_gravity(cj, e))
-      return;
-    if (ci->gcount == 0 || cj->gcount == 0) return;
+    /* Should we even bother? */
+    if (!cell_is_active_stars(ci, e) && !cell_is_active_stars(cj, e)) return;
 
-    /* Atomically drift the multipole in ci */
-    lock_lock(&ci->mlock);
-    if (ci->ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
-    if (lock_unlock(&ci->mlock) != 0) error("Impossible to unlock m-pole");
+    /* Get the orientation of the pair. */
+    double shift[3];
+    int sid = space_getsid(s->space, &ci, &cj, shift);
 
-    /* Atomically drift the multipole in cj */
-    lock_lock(&cj->mlock);
-    if (cj->ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
-    if (lock_unlock(&cj->mlock) != 0) error("Impossible to unlock m-pole");
+    /* recurse? */
+    if (cell_can_recurse_in_pair_stars_task(ci, cj) &&
+        cell_can_recurse_in_pair_stars_task(cj, ci)) {
 
-    /* Can we use multipoles ? */
-    if (cell_can_use_pair_mm(ci, cj, e, sp)) {
+      /* Different types of flags. */
+      switch (sid) {
 
-      /* Ok, no need to drift anything */
-      return;
-    }
-    /* Otherwise, activate the gpart drifts if we are at the bottom. */
-    else if (!ci->split && !cj->split) {
+        /* Regular sub-cell interactions of a single cell. */
+        case 0: /* (  1 ,  1 ,  1 ) */
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          break;
 
-      /* Activate the drifts if the cells are local. */
-      if (cell_is_active_gravity(ci, e) || cell_is_active_gravity(cj, e)) {
-        if (ci->nodeID == engine_rank) cell_activate_drift_gpart(ci, s);
-        if (cj->nodeID == engine_rank) cell_activate_drift_gpart(cj, s);
-      }
-    }
-    /* Ok, we can still recurse */
-    else {
+        case 1: /* (  1 ,  1 ,  0 ) */
+          if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[0],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[1],
+                                              s);
+          break;
 
-      /* Recover the multipole information */
-      struct gravity_tensors *const multi_i = ci->multipole;
-      struct gravity_tensors *const multi_j = cj->multipole;
-      const double ri_max = multi_i->r_max;
-      const double rj_max = multi_j->r_max;
+        case 2: /* (  1 ,  1 , -1 ) */
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          break;
 
-      if (ri_max > rj_max) {
-        if (ci->split) {
+        case 3: /* (  1 ,  0 ,  1 ) */
+          if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[0],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[2],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[2],
+                                              s);
+          break;
 
-          /* Loop over ci's children */
-          for (int k = 0; k < 8; k++) {
-            if (ci->progeny[k] != NULL)
-              cell_activate_subcell_grav_tasks(ci->progeny[k], cj, s);
-          }
+        case 4: /* (  1 ,  0 ,  0 ) */
+          if (ci->progeny[4] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[0],
+                                              s);
+          if (ci->progeny[4] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[1],
+                                              s);
+          if (ci->progeny[4] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[2],
+                                              s);
+          if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[3],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[0],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[1],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[2],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[3],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[0],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[2],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[3],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[1],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[2],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[3],
+                                              s);
+          break;
 
-        } else if (cj->split) {
+        case 5: /* (  1 ,  0 , -1 ) */
+          if (ci->progeny[4] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[1],
+                                              s);
+          if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[3],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[3],
+                                              s);
+          break;
 
-          /* Loop over cj's children */
-          for (int k = 0; k < 8; k++) {
-            if (cj->progeny[k] != NULL)
-              cell_activate_subcell_grav_tasks(ci, cj->progeny[k], s);
-          }
+        case 6: /* (  1 , -1 ,  1 ) */
+          if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[2],
+                                              s);
+          break;
 
-        } else {
-          error("Fundamental error in the logic");
-        }
-      } else if (rj_max >= ri_max) {
-        if (cj->split) {
+        case 7: /* (  1 , -1 ,  0 ) */
+          if (ci->progeny[4] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[2],
+                                              s);
+          if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[3],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[2],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[3],
+                                              s);
+          break;
 
-          /* Loop over cj's children */
-          for (int k = 0; k < 8; k++) {
-            if (cj->progeny[k] != NULL)
-              cell_activate_subcell_grav_tasks(ci, cj->progeny[k], s);
-          }
+        case 8: /* (  1 , -1 , -1 ) */
+          if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[4], cj->progeny[3],
+                                              s);
+          break;
+
+        case 9: /* (  0 ,  1 ,  1 ) */
+          if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[0],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[4],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[4],
+                                              s);
+          break;
+
+        case 10: /* (  0 ,  1 ,  0 ) */
+          if (ci->progeny[2] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[0],
+                                              s);
+          if (ci->progeny[2] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[1],
+                                              s);
+          if (ci->progeny[2] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[4],
+                                              s);
+          if (ci->progeny[2] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[5],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[0],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[1],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[4],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[5],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[0],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[4],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[5],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[1],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[4],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[5],
+                                              s);
+          break;
+
+        case 11: /* (  0 ,  1 , -1 ) */
+          if (ci->progeny[2] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[1],
+                                              s);
+          if (ci->progeny[2] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[2], cj->progeny[5],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[1],
+                                              s);
+          if (ci->progeny[6] != NULL && cj->progeny[5] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[6], cj->progeny[5],
+                                              s);
+          break;
+
+        case 12: /* (  0 ,  0 ,  1 ) */
+          if (ci->progeny[1] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[1], cj->progeny[0],
+                                              s);
+          if (ci->progeny[1] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[1], cj->progeny[2],
+                                              s);
+          if (ci->progeny[1] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[1], cj->progeny[4],
+                                              s);
+          if (ci->progeny[1] != NULL && cj->progeny[6] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[1], cj->progeny[6],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[0],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[2],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[4],
+                                              s);
+          if (ci->progeny[3] != NULL && cj->progeny[6] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[3], cj->progeny[6],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[0],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[2],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[4],
+                                              s);
+          if (ci->progeny[5] != NULL && cj->progeny[6] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[5], cj->progeny[6],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[0],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[2],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[4],
+                                              s);
+          if (ci->progeny[7] != NULL && cj->progeny[6] != NULL)
+            cell_activate_subcell_stars_tasks(ci->progeny[7], cj->progeny[6],
+                                              s);
+          break;
+      }
+
+    }
+
+    /* Otherwise, activate the sorts and drifts. */
+    else {
+
+      if (cell_is_active_stars(ci, e)) {
+        /* We are going to interact this pair, so store some values. */
+        atomic_or(&cj->hydro.requires_sorts, 1 << sid);
+        atomic_or(&ci->stars.requires_sorts, 1 << sid);
+
+        cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
+        ci->stars.dx_max_sort_old = ci->stars.dx_max_sort;
+
+        /* Activate the drifts if the cells are local. */
+        if (ci->nodeID == engine_rank) cell_activate_drift_spart(ci, s);
+        if (cj->nodeID == engine_rank) cell_activate_drift_part(cj, s);
+
+        /* Do we need to sort the cells? */
+        cell_activate_hydro_sorts(cj, sid, s);
+        cell_activate_stars_sorts(ci, sid, s);
+      }
+
+      if (cell_is_active_stars(cj, e)) {
+        /* We are going to interact this pair, so store some values. */
+        atomic_or(&cj->stars.requires_sorts, 1 << sid);
+        atomic_or(&ci->hydro.requires_sorts, 1 << sid);
+
+        ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+        cj->stars.dx_max_sort_old = cj->stars.dx_max_sort;
+
+        /* Activate the drifts if the cells are local. */
+        if (ci->nodeID == engine_rank) cell_activate_drift_part(ci, s);
+        if (cj->nodeID == engine_rank) cell_activate_drift_spart(cj, s);
+
+        /* Do we need to sort the cells? */
+        cell_activate_hydro_sorts(ci, sid, s);
+        cell_activate_stars_sorts(cj, sid, s);
+      }
+    }
+  } /* Otherwise, pair interation */
+}
+
+/**
+ * @brief Traverse a sub-cell task and activate the gravity drift tasks that
+ * are required by a self gravity task.
+ *
+ * @param ci The first #cell we recurse in.
+ * @param cj The second #cell we recurse in.
+ * @param s The task #scheduler.
+ */
+void cell_activate_subcell_grav_tasks(struct cell *ci, struct cell *cj,
+                                      struct scheduler *s) {
+  /* Some constants */
+  const struct space *sp = s->space;
+  const struct engine *e = sp->e;
+
+  /* Self interaction? */
+  if (cj == NULL) {
+
+    /* Do anything? */
+    if (ci->grav.count == 0 || !cell_is_active_gravity(ci, e)) return;
+
+    /* Recurse? */
+    if (ci->split) {
+
+      /* Loop over all progenies and pairs of progenies */
+      for (int j = 0; j < 8; j++) {
+        if (ci->progeny[j] != NULL) {
+          cell_activate_subcell_grav_tasks(ci->progeny[j], NULL, s);
+          for (int k = j + 1; k < 8; k++)
+            if (ci->progeny[k] != NULL)
+              cell_activate_subcell_grav_tasks(ci->progeny[j], ci->progeny[k],
+                                               s);
+        }
+      }
+    } else {
+
+      /* We have reached the bottom of the tree: activate gpart drift */
+      cell_activate_drift_gpart(ci, s);
+    }
+  }
+
+  /* Pair interaction */
+  else {
+
+    /* Anything to do here? */
+    if (!cell_is_active_gravity(ci, e) && !cell_is_active_gravity(cj, e))
+      return;
+    if (ci->grav.count == 0 || cj->grav.count == 0) return;
+
+    /* Atomically drift the multipole in ci */
+    lock_lock(&ci->grav.mlock);
+    if (ci->grav.ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
+    if (lock_unlock(&ci->grav.mlock) != 0) error("Impossible to unlock m-pole");
+
+    /* Atomically drift the multipole in cj */
+    lock_lock(&cj->grav.mlock);
+    if (cj->grav.ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
+    if (lock_unlock(&cj->grav.mlock) != 0) error("Impossible to unlock m-pole");
+
+    /* Can we use multipoles ? */
+    if (cell_can_use_pair_mm(ci, cj, e, sp)) {
+
+      /* Ok, no need to drift anything */
+      return;
+    }
+    /* Otherwise, activate the gpart drifts if we are at the bottom. */
+    else if (!ci->split && !cj->split) {
+
+      /* Activate the drifts if the cells are local. */
+      if (cell_is_active_gravity(ci, e) || cell_is_active_gravity(cj, e)) {
+        if (ci->nodeID == engine_rank) cell_activate_drift_gpart(ci, s);
+        if (cj->nodeID == engine_rank) cell_activate_drift_gpart(cj, s);
+      }
+    }
+    /* Ok, we can still recurse */
+    else {
+
+      /* Recover the multipole information */
+      const struct gravity_tensors *const multi_i = ci->grav.multipole;
+      const struct gravity_tensors *const multi_j = cj->grav.multipole;
+      const double ri_max = multi_i->r_max;
+      const double rj_max = multi_j->r_max;
+
+      if (ri_max > rj_max) {
+        if (ci->split) {
+
+          /* Loop over ci's children */
+          for (int k = 0; k < 8; k++) {
+            if (ci->progeny[k] != NULL)
+              cell_activate_subcell_grav_tasks(ci->progeny[k], cj, s);
+          }
+
+        } else if (cj->split) {
+
+          /* Loop over cj's children */
+          for (int k = 0; k < 8; k++) {
+            if (cj->progeny[k] != NULL)
+              cell_activate_subcell_grav_tasks(ci, cj->progeny[k], s);
+          }
+
+        } else {
+          error("Fundamental error in the logic");
+        }
+      } else if (rj_max >= ri_max) {
+        if (cj->split) {
+
+          /* Loop over cj's children */
+          for (int k = 0; k < 8; k++) {
+            if (cj->progeny[k] != NULL)
+              cell_activate_subcell_grav_tasks(ci, cj->progeny[k], s);
+          }
 
         } else if (ci->split) {
 
@@ -2135,45 +3047,58 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
 
   struct engine *e = s->space->e;
   const int nodeID = e->nodeID;
+  const int with_limiter = (e->policy & engine_policy_limiter);
   int rebuild = 0;
 
   /* Un-skip the density tasks involved with this cell. */
-  for (struct link *l = c->density; l != NULL; l = l->next) {
+  for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
     struct task *t = l->t;
     struct cell *ci = t->ci;
     struct cell *cj = t->cj;
     const int ci_active = cell_is_active_hydro(ci, e);
     const int cj_active = (cj != NULL) ? cell_is_active_hydro(cj, e) : 0;
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
 
     /* Only activate tasks that involve a local active cell. */
-    if ((ci_active && ci->nodeID == nodeID) ||
-        (cj_active && cj->nodeID == nodeID)) {
+    if ((ci_active && ci_nodeID == nodeID) ||
+        (cj_active && cj_nodeID == nodeID)) {
       scheduler_activate(s, t);
 
       /* Activate hydro drift */
       if (t->type == task_type_self) {
-        if (ci->nodeID == nodeID) cell_activate_drift_part(ci, s);
+        if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+        if (ci_nodeID == nodeID && with_limiter) cell_activate_limiter(ci, s);
       }
 
       /* Set the correct sorting flags and activate hydro drifts */
       else if (t->type == task_type_pair) {
         /* Store some values. */
-        atomic_or(&ci->requires_sorts, 1 << t->flags);
-        atomic_or(&cj->requires_sorts, 1 << t->flags);
-        ci->dx_max_sort_old = ci->dx_max_sort;
-        cj->dx_max_sort_old = cj->dx_max_sort;
+        atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
+        atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
+        ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+        cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
 
         /* Activate the drift tasks. */
-        if (ci->nodeID == nodeID) cell_activate_drift_part(ci, s);
-        if (cj->nodeID == nodeID) cell_activate_drift_part(cj, s);
+        if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+        if (cj_nodeID == nodeID) cell_activate_drift_part(cj, s);
+
+        /* Activate the limiter tasks. */
+        if (ci_nodeID == nodeID && with_limiter) cell_activate_limiter(ci, s);
+        if (cj_nodeID == nodeID && with_limiter) cell_activate_limiter(cj, s);
 
         /* Check the sorts and activate them if needed. */
-        cell_activate_sorts(ci, t->flags, s);
-        cell_activate_sorts(cj, t->flags, s);
+        cell_activate_hydro_sorts(ci, t->flags, s);
+        cell_activate_hydro_sorts(cj, t->flags, s);
       }
       /* Store current values of dx_max and h_max. */
       else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
-        cell_activate_subcell_hydro_tasks(t->ci, t->cj, s);
+        cell_activate_subcell_hydro_tasks(ci, cj, s);
       }
     }
 
@@ -2182,88 +3107,100 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
 
       /* Check whether there was too much particle motion, i.e. the
          cell neighbour conditions were violated. */
-      if (cell_need_rebuild_for_pair(ci, cj)) rebuild = 1;
+      if (cell_need_rebuild_for_hydro_pair(ci, cj)) rebuild = 1;
 
 #ifdef WITH_MPI
       /* Activate the send/recv tasks. */
-      if (ci->nodeID != nodeID) {
+      if (ci_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
         if (cj_active) {
-          scheduler_activate(s, ci->recv_xv);
+          scheduler_activate(s, ci->mpi.hydro.recv_xv);
           if (ci_active) {
-            scheduler_activate(s, ci->recv_rho);
+            scheduler_activate(s, ci->mpi.hydro.recv_rho);
 
 #ifdef EXTRA_HYDRO_LOOP
-            scheduler_activate(s, ci->recv_gradient);
+            scheduler_activate(s, ci->mpi.hydro.recv_gradient);
 #endif
           }
         }
 
         /* If the foreign cell is active, we want its ti_end values. */
-        if (ci_active) scheduler_activate(s, ci->recv_ti);
+        if (ci_active || with_limiter) scheduler_activate(s, ci->mpi.recv_ti);
+
+        if (with_limiter) scheduler_activate(s, ci->mpi.limiter.recv);
+        if (with_limiter)
+          scheduler_activate_send(s, cj->mpi.limiter.send, ci->nodeID);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (ci_active) {
 
-          scheduler_activate_send(s, cj->send_xv, ci->nodeID);
+          scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
 
           /* Drift the cell which will be sent; note that not all sent
              particles will be drifted, only those that are needed. */
           cell_activate_drift_part(cj, s);
+          if (with_limiter) cell_activate_limiter(cj, s);
 
           /* If the local cell is also active, more stuff will be needed. */
           if (cj_active) {
-            scheduler_activate_send(s, cj->send_rho, ci->nodeID);
+            scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
 
 #ifdef EXTRA_HYDRO_LOOP
-            scheduler_activate_send(s, cj->send_gradient, ci->nodeID);
+            scheduler_activate_send(s, cj->mpi.hydro.send_gradient, ci_nodeID);
 #endif
           }
         }
 
         /* If the local cell is active, send its ti_end values. */
-        if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
+        if (cj_active || with_limiter)
+          scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
 
-      } else if (cj->nodeID != nodeID) {
+      } else if (cj_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
         if (ci_active) {
-          scheduler_activate(s, cj->recv_xv);
+          scheduler_activate(s, cj->mpi.hydro.recv_xv);
           if (cj_active) {
-            scheduler_activate(s, cj->recv_rho);
+            scheduler_activate(s, cj->mpi.hydro.recv_rho);
 
 #ifdef EXTRA_HYDRO_LOOP
-            scheduler_activate(s, cj->recv_gradient);
+            scheduler_activate(s, cj->mpi.hydro.recv_gradient);
 #endif
           }
         }
 
         /* If the foreign cell is active, we want its ti_end values. */
-        if (cj_active) scheduler_activate(s, cj->recv_ti);
+        if (cj_active || with_limiter) scheduler_activate(s, cj->mpi.recv_ti);
+
+        if (with_limiter) scheduler_activate(s, cj->mpi.limiter.recv);
+        if (with_limiter)
+          scheduler_activate_send(s, ci->mpi.limiter.send, cj->nodeID);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (cj_active) {
 
-          scheduler_activate_send(s, ci->send_xv, cj->nodeID);
+          scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
 
           /* Drift the cell which will be sent; note that not all sent
              particles will be drifted, only those that are needed. */
           cell_activate_drift_part(ci, s);
+          if (with_limiter) cell_activate_limiter(ci, s);
 
           /* If the local cell is also active, more stuff will be needed. */
           if (ci_active) {
 
-            scheduler_activate_send(s, ci->send_rho, cj->nodeID);
+            scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
 
 #ifdef EXTRA_HYDRO_LOOP
-            scheduler_activate_send(s, ci->send_gradient, cj->nodeID);
+            scheduler_activate_send(s, ci->mpi.hydro.send_gradient, cj_nodeID);
 #endif
           }
         }
 
         /* If the local cell is active, send its ti_end values. */
-        if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
+        if (ci_active || with_limiter)
+          scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
       }
 #endif
     }
@@ -2272,21 +3209,26 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
   /* Unskip all the other task types. */
   if (c->nodeID == nodeID && cell_is_active_hydro(c, e)) {
 
-    for (struct link *l = c->gradient; l != NULL; l = l->next)
+    for (struct link *l = c->hydro.gradient; l != NULL; l = l->next)
       scheduler_activate(s, l->t);
-    for (struct link *l = c->force; l != NULL; l = l->next)
+    for (struct link *l = c->hydro.force; l != NULL; l = l->next)
+      scheduler_activate(s, l->t);
+    for (struct link *l = c->hydro.limiter; l != NULL; l = l->next)
       scheduler_activate(s, l->t);
 
-    if (c->extra_ghost != NULL) scheduler_activate(s, c->extra_ghost);
-    if (c->ghost_in != NULL) scheduler_activate(s, c->ghost_in);
-    if (c->ghost_out != NULL) scheduler_activate(s, c->ghost_out);
-    if (c->ghost != NULL) scheduler_activate(s, c->ghost);
+    if (c->hydro.extra_ghost != NULL)
+      scheduler_activate(s, c->hydro.extra_ghost);
+    if (c->hydro.ghost_in != NULL) scheduler_activate(s, c->hydro.ghost_in);
+    if (c->hydro.ghost_out != NULL) scheduler_activate(s, c->hydro.ghost_out);
+    if (c->hydro.ghost != NULL) scheduler_activate(s, c->hydro.ghost);
     if (c->kick1 != NULL) scheduler_activate(s, c->kick1);
     if (c->kick2 != NULL) scheduler_activate(s, c->kick2);
     if (c->timestep != NULL) scheduler_activate(s, c->timestep);
-    if (c->end_force != NULL) scheduler_activate(s, c->end_force);
-    if (c->cooling != NULL) scheduler_activate(s, c->cooling);
-    if (c->sourceterms != NULL) scheduler_activate(s, c->sourceterms);
+    if (c->hydro.end_force != NULL) scheduler_activate(s, c->hydro.end_force);
+    if (c->hydro.cooling != NULL) scheduler_activate(s, c->hydro.cooling);
+    if (c->hydro.star_formation != NULL)
+      scheduler_activate(s, c->hydro.star_formation);
+    if (c->logger != NULL) scheduler_activate(s, c->logger);
   }
 
   return rebuild;
@@ -2308,18 +3250,24 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
   int rebuild = 0;
 
   /* Un-skip the gravity tasks involved with this cell. */
-  for (struct link *l = c->grav; l != NULL; l = l->next) {
+  for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
     struct task *t = l->t;
     struct cell *ci = t->ci;
     struct cell *cj = t->cj;
-    const int ci_nodeID = ci->nodeID;
-    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
     const int ci_active = cell_is_active_gravity(ci, e);
     const int cj_active = (cj != NULL) ? cell_is_active_gravity(cj, e) : 0;
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
 
     /* Only activate tasks that involve a local active cell. */
     if ((ci_active && ci_nodeID == nodeID) ||
         (cj_active && cj_nodeID == nodeID)) {
+
       scheduler_activate(s, t);
 
       /* Set the drifting flags */
@@ -2331,7 +3279,9 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
       } else if (t->type == task_type_pair) {
         cell_activate_subcell_grav_tasks(ci, cj, s);
       } else if (t->type == task_type_grav_mm) {
-        cell_activate_grav_mm_task(ci, cj, s);
+#ifdef SWIFT_DEBUG_CHECKS
+        error("Incorrectly linked M-M task!");
+#endif
       }
     }
 
@@ -2342,17 +3292,15 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
       if (ci_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
-        if (cj_active) {
-          scheduler_activate(s, ci->recv_grav);
-        }
+        if (cj_active) scheduler_activate(s, ci->mpi.grav.recv);
 
         /* If the foreign cell is active, we want its ti_end values. */
-        if (ci_active) scheduler_activate(s, ci->recv_ti);
+        if (ci_active) scheduler_activate(s, ci->mpi.recv_ti);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (ci_active) {
 
-          scheduler_activate_send(s, cj->send_grav, ci_nodeID);
+          scheduler_activate_send(s, cj->mpi.grav.send, ci_nodeID);
 
           /* Drift the cell which will be sent at the level at which it is
              sent, i.e. drift the cell specified in the send task (l->t)
@@ -2361,22 +3309,20 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
         }
 
         /* If the local cell is active, send its ti_end values. */
-        if (cj_active) scheduler_activate_send(s, cj->send_ti, ci_nodeID);
+        if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
 
       } else if (cj_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
-        if (ci_active) {
-          scheduler_activate(s, cj->recv_grav);
-        }
+        if (ci_active) scheduler_activate(s, cj->mpi.grav.recv);
 
         /* If the foreign cell is active, we want its ti_end values. */
-        if (cj_active) scheduler_activate(s, cj->recv_ti);
+        if (cj_active) scheduler_activate(s, cj->mpi.recv_ti);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (cj_active) {
 
-          scheduler_activate_send(s, ci->send_grav, cj_nodeID);
+          scheduler_activate_send(s, ci->mpi.grav.send, cj_nodeID);
 
           /* Drift the cell which will be sent at the level at which it is
              sent, i.e. drift the cell specified in the send task (l->t)
@@ -2385,40 +3331,281 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
         }
 
         /* If the local cell is active, send its ti_end values. */
-        if (ci_active) scheduler_activate_send(s, ci->send_ti, cj_nodeID);
+        if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
       }
 #endif
     }
   }
 
-  /* Unskip all the other task types. */
-  if (c->nodeID == nodeID && cell_is_active_gravity(c, e)) {
-
-    if (c->init_grav != NULL) scheduler_activate(s, c->init_grav);
-    if (c->init_grav_out != NULL) scheduler_activate(s, c->init_grav_out);
-    if (c->kick1 != NULL) scheduler_activate(s, c->kick1);
-    if (c->kick2 != NULL) scheduler_activate(s, c->kick2);
-    if (c->timestep != NULL) scheduler_activate(s, c->timestep);
-    if (c->end_force != NULL) scheduler_activate(s, c->end_force);
-    if (c->grav_down != NULL) scheduler_activate(s, c->grav_down);
-    if (c->grav_down_in != NULL) scheduler_activate(s, c->grav_down_in);
-    if (c->grav_mesh != NULL) scheduler_activate(s, c->grav_mesh);
-    if (c->grav_long_range != NULL) scheduler_activate(s, c->grav_long_range);
-  }
-
-  return rebuild;
-}
+  for (struct link *l = c->grav.mm; l != NULL; l = l->next) {
 
-/**
- * @brief Set the super-cell pointers for all cells in a hierarchy.
- *
+    struct task *t = l->t;
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+    const int ci_active = cell_is_active_gravity_mm(ci, e);
+    const int cj_active = cell_is_active_gravity_mm(cj, e);
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (t->type != task_type_grav_mm) error("Incorrectly linked gravity task!");
+#endif
+
+    /* Only activate tasks that involve a local active cell. */
+    if ((ci_active && ci_nodeID == nodeID) ||
+        (cj_active && cj_nodeID == nodeID)) {
+
+      scheduler_activate(s, t);
+    }
+  }
+
+  /* Unskip all the other task types. */
+  if (c->nodeID == nodeID && cell_is_active_gravity(c, e)) {
+
+    if (c->grav.init != NULL) scheduler_activate(s, c->grav.init);
+    if (c->grav.init_out != NULL) scheduler_activate(s, c->grav.init_out);
+    if (c->kick1 != NULL) scheduler_activate(s, c->kick1);
+    if (c->kick2 != NULL) scheduler_activate(s, c->kick2);
+    if (c->timestep != NULL) scheduler_activate(s, c->timestep);
+    if (c->grav.down != NULL) scheduler_activate(s, c->grav.down);
+    if (c->grav.down_in != NULL) scheduler_activate(s, c->grav.down_in);
+    if (c->grav.mesh != NULL) scheduler_activate(s, c->grav.mesh);
+    if (c->grav.long_range != NULL) scheduler_activate(s, c->grav.long_range);
+    if (c->grav.end_force != NULL) scheduler_activate(s, c->grav.end_force);
+    if (c->logger != NULL) scheduler_activate(s, c->logger);
+
+    /* Subgrid tasks */
+    if ((e->policy & engine_policy_cooling) && c->hydro.cooling != NULL)
+      scheduler_activate(s, c->hydro.cooling);
+    if ((e->policy & engine_policy_star_formation) &&
+        c->hydro.star_formation != NULL)
+      scheduler_activate(s, c->hydro.star_formation);
+  }
+
+  return rebuild;
+}
+
+/**
+ * @brief Un-skips all the stars tasks associated with a given cell and checks
+ * if the space needs to be rebuilt.
+ *
+ * @param c the #cell.
+ * @param s the #scheduler.
+ *
+ * @return 1 If the space needs rebuilding. 0 otherwise.
+ */
+int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
+
+  struct engine *e = s->space->e;
+  const int with_feedback = (e->policy & engine_policy_feedback);
+  const int nodeID = e->nodeID;
+  int rebuild = 0;
+
+  if (!with_feedback && c->stars.drift != NULL && cell_is_active_stars(c, e)) {
+    cell_activate_drift_spart(c, s);
+  }
+
+  /* Un-skip the density tasks involved with this cell. */
+  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
+    struct task *t = l->t;
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+    const int ci_active = cell_is_active_stars(ci, e);
+    const int cj_active = (cj != NULL) ? cell_is_active_stars(cj, e) : 0;
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
+
+    /* Activate the drifts */
+    if (t->type == task_type_self && ci_active) {
+      cell_activate_drift_part(ci, s);
+      cell_activate_drift_spart(ci, s);
+    }
+
+    /* Only activate tasks that involve a local active cell. */
+    if ((ci_active || cj_active) &&
+        (ci_nodeID == nodeID || cj_nodeID == nodeID)) {
+
+      scheduler_activate(s, t);
+
+      if (t->type == task_type_pair) {
+
+        /* Do ci */
+        if (ci_active) {
+          /* stars for ci */
+          atomic_or(&ci->stars.requires_sorts, 1 << t->flags);
+          ci->stars.dx_max_sort_old = ci->stars.dx_max_sort;
+
+          /* hydro for cj */
+          atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
+          cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
+
+          /* Activate the drift tasks. */
+          if (ci_nodeID == nodeID) cell_activate_drift_spart(ci, s);
+          if (cj_nodeID == nodeID) cell_activate_drift_part(cj, s);
+
+          /* Check the sorts and activate them if needed. */
+          cell_activate_stars_sorts(ci, t->flags, s);
+          cell_activate_hydro_sorts(cj, t->flags, s);
+        }
+
+        /* Do cj */
+        if (cj_active) {
+          /* hydro for ci */
+          atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
+          ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+
+          /* stars for cj */
+          atomic_or(&cj->stars.requires_sorts, 1 << t->flags);
+          cj->stars.dx_max_sort_old = cj->stars.dx_max_sort;
+
+          /* Activate the drift tasks. */
+          if (cj_nodeID == nodeID) cell_activate_drift_spart(cj, s);
+          if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+
+          /* Check the sorts and activate them if needed. */
+          cell_activate_hydro_sorts(ci, t->flags, s);
+          cell_activate_stars_sorts(cj, t->flags, s);
+        }
+      }
+
+      else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
+        cell_activate_subcell_stars_tasks(ci, cj, s);
+      }
+    }
+
+    /* Only interested in pair interactions as of here. */
+    if (t->type == task_type_pair || t->type == task_type_sub_pair) {
+
+      /* Check whether there was too much particle motion, i.e. the
+         cell neighbour conditions were violated. */
+      if (cell_need_rebuild_for_stars_pair(ci, cj)) rebuild = 1;
+      if (cell_need_rebuild_for_stars_pair(cj, ci)) rebuild = 1;
+
+#ifdef WITH_MPI
+      /* Activate the send/recv tasks. */
+      if (ci_nodeID != nodeID) {
+
+        if (cj_active) {
+          scheduler_activate(s, ci->mpi.hydro.recv_xv);
+          scheduler_activate(s, ci->mpi.hydro.recv_rho);
+
+          /* If the local cell is active, more stuff will be needed. */
+          scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
+          cell_activate_drift_spart(cj, s);
+
+          /* If the local cell is active, send its ti_end values. */
+          scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
+        }
+
+        if (ci_active) {
+          scheduler_activate(s, ci->mpi.stars.recv);
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          scheduler_activate(s, ci->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
+          scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
+
+          /* Drift the cell which will be sent; note that not all sent
+             particles will be drifted, only those that are needed. */
+          cell_activate_drift_part(cj, s);
+        }
+
+      } else if (cj_nodeID != nodeID) {
+
+        /* If the local cell is active, receive data from the foreign cell. */
+        if (ci_active) {
+          scheduler_activate(s, cj->mpi.hydro.recv_xv);
+          scheduler_activate(s, cj->mpi.hydro.recv_rho);
+
+          /* If the local cell is active, more stuff will be needed. */
+          scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
+          cell_activate_drift_spart(ci, s);
+
+          /* If the local cell is active, send its ti_end values. */
+          scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
+        }
+
+        if (cj_active) {
+          scheduler_activate(s, cj->mpi.stars.recv);
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          scheduler_activate(s, cj->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
+          scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
+
+          /* Drift the cell which will be sent; note that not all sent
+             particles will be drifted, only those that are needed. */
+          cell_activate_drift_part(ci, s);
+        }
+      }
+#endif
+    }
+  }
+
+  /* Un-skip the feedback tasks involved with this cell. */
+  for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
+    struct task *t = l->t;
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+    const int ci_active = cell_is_active_stars(ci, e);
+    const int cj_active = (cj != NULL) ? cell_is_active_stars(cj, e) : 0;
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
+
+    if ((ci_active && cj_nodeID == nodeID) ||
+        (cj_active && ci_nodeID == nodeID)) {
+      scheduler_activate(s, t);
+
+      /* Nothing more to do here, all drifts and sorts activated above */
+    }
+  }
+
+  /* Unskip all the other task types. */
+  if (c->nodeID == nodeID && cell_is_active_stars(c, e)) {
+
+    if (c->stars.ghost != NULL) scheduler_activate(s, c->stars.ghost);
+    if (c->stars.stars_in != NULL) scheduler_activate(s, c->stars.stars_in);
+    if (c->stars.stars_out != NULL) scheduler_activate(s, c->stars.stars_out);
+    if (c->logger != NULL) scheduler_activate(s, c->logger);
+  }
+
+  return rebuild;
+}
+
+/**
+ * @brief Set the super-cell pointers for all cells in a hierarchy.
+ *
  * @param c The top-level #cell to play with.
  * @param super Pointer to the deepest cell with tasks in this part of the tree.
+ * @param with_hydro Are we running with hydrodynamics on?
+ * @param with_grav Are we running with gravity on?
  */
-void cell_set_super(struct cell *c, struct cell *super) {
+void cell_set_super(struct cell *c, struct cell *super, const int with_hydro,
+                    const int with_grav) {
 
-  /* Are we in a cell with some kind of self/pair task ? */
-  if (super == NULL && c->nr_tasks > 0) super = c;
+  /* Are we in a cell which is either the hydro or gravity super? */
+  if (super == NULL && ((with_hydro && c->hydro.super != NULL) ||
+                        (with_grav && c->grav.super != NULL)))
+    super = c;
 
   /* Set the super-cell */
   c->super = super;
@@ -2426,7 +3613,8 @@ void cell_set_super(struct cell *c, struct cell *super) {
   /* Recurse */
   if (c->split)
     for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL) cell_set_super(c->progeny[k], super);
+      if (c->progeny[k] != NULL)
+        cell_set_super(c->progeny[k], super, with_hydro, with_grav);
 }
 
 /**
@@ -2439,10 +3627,10 @@ void cell_set_super(struct cell *c, struct cell *super) {
 void cell_set_super_hydro(struct cell *c, struct cell *super_hydro) {
 
   /* Are we in a cell with some kind of self/pair task ? */
-  if (super_hydro == NULL && c->density != NULL) super_hydro = c;
+  if (super_hydro == NULL && c->hydro.density != NULL) super_hydro = c;
 
   /* Set the super-cell */
-  c->super_hydro = super_hydro;
+  c->hydro.super = super_hydro;
 
   /* Recurse */
   if (c->split)
@@ -2461,10 +3649,11 @@ void cell_set_super_hydro(struct cell *c, struct cell *super_hydro) {
 void cell_set_super_gravity(struct cell *c, struct cell *super_gravity) {
 
   /* Are we in a cell with some kind of self/pair task ? */
-  if (super_gravity == NULL && c->grav != NULL) super_gravity = c;
+  if (super_gravity == NULL && (c->grav.grav != NULL || c->grav.mm != NULL))
+    super_gravity = c;
 
   /* Set the super-cell */
-  c->super_gravity = super_gravity;
+  c->grav.super = super_gravity;
 
   /* Recurse */
   if (c->split)
@@ -2484,24 +3673,26 @@ void cell_set_super_mapper(void *map_data, int num_elements, void *extra_data) {
 
   const struct engine *e = (const struct engine *)extra_data;
 
+  const int with_hydro = (e->policy & engine_policy_hydro);
+  const int with_grav = (e->policy & engine_policy_self_gravity) ||
+                        (e->policy & engine_policy_external_gravity);
+
   for (int ind = 0; ind < num_elements; ind++) {
     struct cell *c = &((struct cell *)map_data)[ind];
 
     /* All top-level cells get an MPI tag. */
 #ifdef WITH_MPI
-    if (c->tag < 0 && c->sendto) cell_tag(c);
+    cell_ensure_tagged(c);
 #endif
 
     /* Super-pointer for hydro */
-    if (e->policy & engine_policy_hydro) cell_set_super_hydro(c, NULL);
+    if (with_hydro) cell_set_super_hydro(c, NULL);
 
     /* Super-pointer for gravity */
-    if ((e->policy & engine_policy_self_gravity) ||
-        (e->policy & engine_policy_external_gravity))
-      cell_set_super_gravity(c, NULL);
+    if (with_grav) cell_set_super_gravity(c, NULL);
 
     /* Super-pointer for common operations */
-    cell_set_super(c, NULL);
+    cell_set_super(c, NULL, with_hydro, with_grav);
   }
 }
 
@@ -2516,7 +3707,7 @@ void cell_set_super_mapper(void *map_data, int num_elements, void *extra_data) {
 int cell_has_tasks(struct cell *c) {
 
 #ifdef WITH_MPI
-  if (c->timestep != NULL || c->recv_ti != NULL) return 1;
+  if (c->timestep != NULL || c->mpi.recv_ti != NULL) return 1;
 #else
   if (c->timestep != NULL) return 1;
 #endif
@@ -2540,18 +3731,22 @@ int cell_has_tasks(struct cell *c) {
  */
 void cell_drift_part(struct cell *c, const struct engine *e, int force) {
 
+  const int periodic = e->s->periodic;
+  const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
   const float hydro_h_max = e->hydro_properties->h_max;
-  const integertime_t ti_old_part = c->ti_old_part;
+  const float hydro_h_min = e->hydro_properties->h_min;
+  const integertime_t ti_old_part = c->hydro.ti_old_part;
   const integertime_t ti_current = e->ti_current;
-  struct part *const parts = c->parts;
-  struct xpart *const xparts = c->xparts;
+  struct part *const parts = c->hydro.parts;
+  struct xpart *const xparts = c->hydro.xparts;
 
   float dx_max = 0.f, dx2_max = 0.f;
   float dx_max_sort = 0.0f, dx2_max_sort = 0.f;
   float cell_h_max = 0.f;
 
   /* Drift irrespective of cell flags? */
-  force |= c->do_drift;
+  force |= c->hydro.do_drift;
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that we only drift local cells. */
@@ -2561,8 +3756,23 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
   if (ti_current < ti_old_part) error("Attempt to drift to the past");
 #endif
 
+  /* Early abort? */
+  if (c->hydro.count == 0) {
+
+    /* Clear the drift flags. */
+    c->hydro.do_drift = 0;
+    c->hydro.do_sub_drift = 0;
+
+    /* Update the time of the last drift */
+    c->hydro.ti_old_part = ti_current;
+
+    return;
+  }
+
+  /* Ok, we have some particles somewhere in the hierarchy to drift */
+
   /* Are we not in a leaf ? */
-  if (c->split && (force || c->do_sub_drift)) {
+  if (c->split && (force || c->hydro.do_sub_drift)) {
 
     /* Loop over the progeny and collect their data. */
     for (int k = 0; k < 8; k++) {
@@ -2573,25 +3783,25 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
         cell_drift_part(cp, e, force);
 
         /* Update */
-        dx_max = max(dx_max, cp->dx_max_part);
-        dx_max_sort = max(dx_max_sort, cp->dx_max_sort);
-        cell_h_max = max(cell_h_max, cp->h_max);
+        dx_max = max(dx_max, cp->hydro.dx_max_part);
+        dx_max_sort = max(dx_max_sort, cp->hydro.dx_max_sort);
+        cell_h_max = max(cell_h_max, cp->hydro.h_max);
       }
     }
 
     /* Store the values */
-    c->h_max = cell_h_max;
-    c->dx_max_part = dx_max;
-    c->dx_max_sort = dx_max_sort;
+    c->hydro.h_max = cell_h_max;
+    c->hydro.dx_max_part = dx_max;
+    c->hydro.dx_max_sort = dx_max_sort;
 
     /* Update the time of the last drift */
-    c->ti_old_part = ti_current;
+    c->hydro.ti_old_part = ti_current;
 
   } else if (!c->split && force && ti_current > ti_old_part) {
 
     /* Drift from the last time the cell was drifted to the current time */
     double dt_drift, dt_kick_grav, dt_kick_hydro, dt_therm;
-    if (e->policy & engine_policy_cosmology) {
+    if (with_cosmology) {
       dt_drift =
           cosmology_get_drift_factor(e->cosmology, ti_old_part, ti_current);
       dt_kick_grav =
@@ -2608,58 +3818,59 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
     }
 
     /* Loop over all the gas particles in the cell */
-    const size_t nr_parts = c->count;
+    const size_t nr_parts = c->hydro.count;
     for (size_t k = 0; k < nr_parts; k++) {
 
       /* Get a handle on the part. */
       struct part *const p = &parts[k];
       struct xpart *const xp = &xparts[k];
 
+      /* Ignore inhibited particles */
+      if (part_is_inhibited(p, e)) continue;
+
       /* Drift... */
       drift_part(p, xp, dt_drift, dt_kick_hydro, dt_kick_grav, dt_therm,
                  ti_old_part, ti_current);
 
+      /* Update the tracers properties */
+      tracers_after_drift(p, xp, e->internal_units, e->physical_constants,
+                          with_cosmology, e->cosmology, e->hydro_properties,
+                          e->cooling_func, e->time);
+
 #ifdef SWIFT_DEBUG_CHECKS
       /* Make sure the particle does not drift by more than a box length. */
-      if (fabsf(xp->v_full[0] * dt_drift) > e->s->dim[0] ||
-          fabsf(xp->v_full[1] * dt_drift) > e->s->dim[1] ||
-          fabsf(xp->v_full[2] * dt_drift) > e->s->dim[2]) {
+      if (fabs(xp->v_full[0] * dt_drift) > e->s->dim[0] ||
+          fabs(xp->v_full[1] * dt_drift) > e->s->dim[1] ||
+          fabs(xp->v_full[2] * dt_drift) > e->s->dim[2]) {
         error("Particle drifts by more than a box length!");
       }
 #endif
 
-#ifdef PLANETARY_SPH
-      /* Remove particles that cross the non-periodic box edge */
-      if (!(e->s->periodic)) {
-        for (int i = 0; i < 3; i++) {
-          if ((p->x[i] - xp->v_full[i] * dt_drift > e->s->dim[i]) ||
-              (p->x[i] - xp->v_full[i] * dt_drift < 0.f) ||
-              ((p->mass != 0.f) && ((p->x[i] < 0.01f * e->s->dim[i]) ||
-                                    (p->x[i] > 0.99f * e->s->dim[i])))) {
-            /* (TEMPORARY) Crudely stop the particle manually */
-            message(
-                "Particle %lld hit a box edge. \n"
-                "  pos=%.4e %.4e %.4e  vel=%.2e %.2e %.2e",
-                p->id, p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2]);
-            for (int j = 0; j < 3; j++) {
-              p->v[j] = 0.f;
-              p->gpart->v_full[j] = 0.f;
-              xp->v_full[j] = 0.f;
-            }
-            p->h = hydro_h_max;
-            p->time_bin = time_bin_inhibited;
-            p->gpart->time_bin = time_bin_inhibited;
-            hydro_part_has_no_neighbours(p, xp, e->cosmology);
-            p->mass = 0.f;
-            p->gpart->mass = 0.f;
-            break;
-          }
+      /* In non-periodic BC runs, remove particles that crossed the border */
+      if (!periodic) {
+
+        /* Did the particle leave the box?  */
+        if ((p->x[0] > dim[0]) || (p->x[0] < 0.) ||  // x
+            (p->x[1] > dim[1]) || (p->x[1] < 0.) ||  // y
+            (p->x[2] > dim[2]) || (p->x[2] < 0.)) {  // z
+
+          /* One last action before death? */
+          hydro_remove_part(p, xp);
+
+          /* Remove the particle entirely */
+          struct gpart *gp = p->gpart;
+          cell_remove_part(e, c, p, xp);
+
+          /* and it's gravity friend */
+          if (gp != NULL) cell_remove_gpart(e, c, gp);
+
+          continue;
         }
       }
-#endif
 
       /* Limit h to within the allowed range */
       p->h = min(p->h, hydro_h_max);
+      p->h = max(p->h, hydro_h_min);
 
       /* Compute (square of) motion since last cell construction */
       const float dx2 = xp->x_diff[0] * xp->x_diff[0] +
@@ -2678,6 +3889,9 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
       if (part_is_active(p, e)) {
         hydro_init_part(p, &e->s->hs);
         chemistry_init_part(p, e->chemistry);
+        tracers_after_init(p, xp, e->internal_units, e->physical_constants,
+                           with_cosmology, e->cosmology, e->hydro_properties,
+                           e->cooling_func, e->time);
       }
     }
 
@@ -2686,17 +3900,17 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
     dx_max_sort = sqrtf(dx2_max_sort);
 
     /* Store the values */
-    c->h_max = cell_h_max;
-    c->dx_max_part = dx_max;
-    c->dx_max_sort = dx_max_sort;
+    c->hydro.h_max = cell_h_max;
+    c->hydro.dx_max_part = dx_max;
+    c->hydro.dx_max_sort = dx_max_sort;
 
     /* Update the time of the last drift */
-    c->ti_old_part = ti_current;
+    c->hydro.ti_old_part = ti_current;
   }
 
   /* Clear the drift flags. */
-  c->do_drift = 0;
-  c->do_sub_drift = 0;
+  c->hydro.do_drift = 0;
+  c->hydro.do_sub_drift = 0;
 }
 
 /**
@@ -2708,13 +3922,15 @@ void cell_drift_part(struct cell *c, const struct engine *e, int force) {
  */
 void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
 
-  const integertime_t ti_old_gpart = c->ti_old_gpart;
+  const int periodic = e->s->periodic;
+  const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const integertime_t ti_old_gpart = c->grav.ti_old_part;
   const integertime_t ti_current = e->ti_current;
-  struct gpart *const gparts = c->gparts;
-  struct spart *const sparts = c->sparts;
+  struct gpart *const gparts = c->grav.parts;
 
   /* Drift irrespective of cell flags? */
-  force |= c->do_grav_drift;
+  force |= c->grav.do_drift;
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that we only drift local cells. */
@@ -2724,8 +3940,23 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
   if (ti_current < ti_old_gpart) error("Attempt to drift to the past");
 #endif
 
+  /* Early abort? */
+  if (c->grav.count == 0) {
+
+    /* Clear the drift flags. */
+    c->grav.do_drift = 0;
+    c->grav.do_sub_drift = 0;
+
+    /* Update the time of the last drift */
+    c->grav.ti_old_part = ti_current;
+
+    return;
+  }
+
+  /* Ok, we have some particles somewhere in the hierarchy to drift */
+
   /* Are we not in a leaf ? */
-  if (c->split && (force || c->do_grav_sub_drift)) {
+  if (c->split && (force || c->grav.do_sub_drift)) {
 
     /* Loop over the progeny and collect their data. */
     for (int k = 0; k < 8; k++) {
@@ -2738,146 +3969,346 @@ void cell_drift_gpart(struct cell *c, const struct engine *e, int force) {
     }
 
     /* Update the time of the last drift */
-    c->ti_old_gpart = ti_current;
+    c->grav.ti_old_part = ti_current;
 
   } else if (!c->split && force && ti_current > ti_old_gpart) {
 
     /* Drift from the last time the cell was drifted to the current time */
     double dt_drift;
-    if (e->policy & engine_policy_cosmology)
+    if (with_cosmology) {
       dt_drift =
           cosmology_get_drift_factor(e->cosmology, ti_old_gpart, ti_current);
-    else
+    } else {
       dt_drift = (ti_current - ti_old_gpart) * e->time_base;
+    }
 
     /* Loop over all the g-particles in the cell */
-    const size_t nr_gparts = c->gcount;
+    const size_t nr_gparts = c->grav.count;
     for (size_t k = 0; k < nr_gparts; k++) {
 
       /* Get a handle on the gpart. */
       struct gpart *const gp = &gparts[k];
 
+      /* Ignore inhibited particles */
+      if (gpart_is_inhibited(gp, e)) continue;
+
       /* Drift... */
       drift_gpart(gp, dt_drift, ti_old_gpart, ti_current);
 
-#ifdef PLANETARY_SPH
-      /* Remove particles that cross the non-periodic box edge */
-      if (!(e->s->periodic)) {
-        for (int i = 0; i < 3; i++) {
-          if ((gp->x[i] - gp->v_full[i] * dt_drift > e->s->dim[i]) ||
-              (gp->x[i] - gp->v_full[i] * dt_drift < 0.f) ||
-              ((gp->mass != 0.f) && ((gp->x[i] < 0.01f * e->s->dim[i]) ||
-                                     (gp->x[i] > 0.99f * e->s->dim[i])))) {
-            /* (TEMPORARY) Crudely stop the particle manually */
-            for (int j = 0; j < 3; j++) {
-              gp->v_full[j] = 0.f;
-            }
-            gp->time_bin = time_bin_inhibited;
-            gp->mass = 0.f;
-            break;
-          }
-        }
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Make sure the particle does not drift by more than a box length. */
+      if (fabs(gp->v_full[0] * dt_drift) > e->s->dim[0] ||
+          fabs(gp->v_full[1] * dt_drift) > e->s->dim[1] ||
+          fabs(gp->v_full[2] * dt_drift) > e->s->dim[2]) {
+        error("Particle drifts by more than a box length!");
       }
 #endif
 
-      /* Init gravity force fields. */
-      if (gpart_is_active(gp, e)) {
-        gravity_init_gpart(gp);
-      }
-    }
+      /* In non-periodic BC runs, remove particles that crossed the border */
+      if (!periodic) {
 
-    /* Loop over all the star particles in the cell */
-    const size_t nr_sparts = c->scount;
-    for (size_t k = 0; k < nr_sparts; k++) {
+        /* Did the particle leave the box?  */
+        if ((gp->x[0] > dim[0]) || (gp->x[0] < 0.) ||  // x
+            (gp->x[1] > dim[1]) || (gp->x[1] < 0.) ||  // y
+            (gp->x[2] > dim[2]) || (gp->x[2] < 0.)) {  // z
 
-      /* Get a handle on the spart. */
-      struct spart *const sp = &sparts[k];
+          /* Remove the particle entirely */
+          if (gp->type == swift_type_dark_matter) cell_remove_gpart(e, c, gp);
 
-      /* Drift... */
-      drift_spart(sp, dt_drift, ti_old_gpart, ti_current);
+          continue;
+        }
+      }
 
-      /* Note: no need to compute dx_max as all spart have a gpart */
+      /* Init gravity force fields. */
+      if (gpart_is_active(gp, e)) {
+        gravity_init_gpart(gp);
+      }
     }
 
     /* Update the time of the last drift */
-    c->ti_old_gpart = ti_current;
+    c->grav.ti_old_part = ti_current;
   }
 
   /* Clear the drift flags. */
-  c->do_grav_drift = 0;
-  c->do_grav_sub_drift = 0;
+  c->grav.do_drift = 0;
+  c->grav.do_sub_drift = 0;
 }
 
 /**
- * @brief Recursively drifts all multipoles in a cell hierarchy.
+ * @brief Recursively drifts the #spart in a cell hierarchy.
  *
  * @param c The #cell.
  * @param e The #engine (to get ti_current).
+ * @param force Drift the particles irrespective of the #cell flags.
  */
-void cell_drift_all_multipoles(struct cell *c, const struct engine *e) {
-
-  const integertime_t ti_old_multipole = c->ti_old_multipole;
+void cell_drift_spart(struct cell *c, const struct engine *e, int force) {
+
+  const int periodic = e->s->periodic;
+  const double dim[3] = {e->s->dim[0], e->s->dim[1], e->s->dim[2]};
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const float stars_h_max = e->hydro_properties->h_max;
+  const float stars_h_min = e->hydro_properties->h_min;
+  const integertime_t ti_old_spart = c->stars.ti_old_part;
   const integertime_t ti_current = e->ti_current;
+  struct spart *const sparts = c->stars.parts;
+
+  float dx_max = 0.f, dx2_max = 0.f;
+  float dx_max_sort = 0.0f, dx2_max_sort = 0.f;
+  float cell_h_max = 0.f;
+
+  /* Drift irrespective of cell flags? */
+  force |= c->stars.do_drift;
 
 #ifdef SWIFT_DEBUG_CHECKS
+  /* Check that we only drift local cells. */
+  if (c->nodeID != engine_rank) error("Drifting a foreign cell is nope.");
+
   /* Check that we are actually going to move forward. */
-  if (ti_current < ti_old_multipole) error("Attempt to drift to the past");
+  if (ti_current < ti_old_spart) error("Attempt to drift to the past");
 #endif
 
-  /* Drift from the last time the cell was drifted to the current time */
-  double dt_drift;
-  if (e->policy & engine_policy_cosmology)
-    dt_drift =
-        cosmology_get_drift_factor(e->cosmology, ti_old_multipole, ti_current);
-  else
-    dt_drift = (ti_current - ti_old_multipole) * e->time_base;
+  /* Early abort? */
+  if (c->stars.count == 0) {
 
-  /* Drift the multipole */
-  if (ti_current > ti_old_multipole) gravity_drift(c->multipole, dt_drift);
+    /* Clear the drift flags. */
+    c->stars.do_drift = 0;
+    c->stars.do_sub_drift = 0;
 
-  /* Are we not in a leaf ? */
-  if (c->split) {
+    /* Update the time of the last drift */
+    c->stars.ti_old_part = ti_current;
 
-    /* Loop over the progeny and recurse. */
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL) cell_drift_all_multipoles(c->progeny[k], e);
+    return;
   }
 
-  /* Update the time of the last drift */
-  c->ti_old_multipole = ti_current;
-}
+  /* Ok, we have some particles somewhere in the hierarchy to drift */
 
-/**
- * @brief Drifts the multipole of a cell to the current time.
- *
- * Only drifts the multipole at this level. Multipoles deeper in the
- * tree are not updated.
- *
- * @param c The #cell.
- * @param e The #engine (to get ti_current).
- */
-void cell_drift_multipole(struct cell *c, const struct engine *e) {
+  /* Are we not in a leaf ? */
+  if (c->split && (force || c->stars.do_sub_drift)) {
 
-  const integertime_t ti_old_multipole = c->ti_old_multipole;
-  const integertime_t ti_current = e->ti_current;
+    /* Loop over the progeny and collect their data. */
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL) {
+        struct cell *cp = c->progeny[k];
 
-#ifdef SWIFT_DEBUG_CHECKS
-  /* Check that we are actually going to move forward. */
-  if (ti_current < ti_old_multipole) error("Attempt to drift to the past");
-#endif
+        /* Recurse */
+        cell_drift_spart(cp, e, force);
 
-  /* Drift from the last time the cell was drifted to the current time */
-  double dt_drift;
-  if (e->policy & engine_policy_cosmology)
-    dt_drift =
-        cosmology_get_drift_factor(e->cosmology, ti_old_multipole, ti_current);
+        /* Update */
+        dx_max = max(dx_max, cp->stars.dx_max_part);
+        dx_max_sort = max(dx_max_sort, cp->stars.dx_max_sort);
+        cell_h_max = max(cell_h_max, cp->stars.h_max);
+      }
+    }
+
+    /* Store the values */
+    c->stars.h_max = cell_h_max;
+    c->stars.dx_max_part = dx_max;
+    c->stars.dx_max_sort = dx_max_sort;
+
+    /* Update the time of the last drift */
+    c->stars.ti_old_part = ti_current;
+
+  } else if (!c->split && force && ti_current > ti_old_spart) {
+
+    /* Drift from the last time the cell was drifted to the current time */
+    double dt_drift;
+    if (with_cosmology) {
+      dt_drift =
+          cosmology_get_drift_factor(e->cosmology, ti_old_spart, ti_current);
+    } else {
+      dt_drift = (ti_current - ti_old_spart) * e->time_base;
+    }
+
+    /* Loop over all the star particles in the cell */
+    const size_t nr_sparts = c->stars.count;
+    for (size_t k = 0; k < nr_sparts; k++) {
+
+      /* Get a handle on the spart. */
+      struct spart *const sp = &sparts[k];
+
+      /* Ignore inhibited particles */
+      if (spart_is_inhibited(sp, e)) continue;
+
+      /* Drift... */
+      drift_spart(sp, dt_drift, ti_old_spart, ti_current);
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Make sure the particle does not drift by more than a box length. */
+      if (fabs(sp->v[0] * dt_drift) > e->s->dim[0] ||
+          fabs(sp->v[1] * dt_drift) > e->s->dim[1] ||
+          fabs(sp->v[2] * dt_drift) > e->s->dim[2]) {
+        error("Particle drifts by more than a box length!");
+      }
+#endif
+
+      /* In non-periodic BC runs, remove particles that crossed the border */
+      if (!periodic) {
+
+        /* Did the particle leave the box?  */
+        if ((sp->x[0] > dim[0]) || (sp->x[0] < 0.) ||  // x
+            (sp->x[1] > dim[1]) || (sp->x[1] < 0.) ||  // y
+            (sp->x[2] > dim[2]) || (sp->x[2] < 0.)) {  // z
+
+          /* Remove the particle entirely */
+          struct gpart *gp = sp->gpart;
+          cell_remove_spart(e, c, sp);
+
+          /* and it's gravity friend */
+          cell_remove_gpart(e, c, gp);
+
+          continue;
+        }
+      }
+
+      /* Limit h to within the allowed range */
+      sp->h = min(sp->h, stars_h_max);
+      sp->h = max(sp->h, stars_h_min);
+
+      /* Compute (square of) motion since last cell construction */
+      const float dx2 = sp->x_diff[0] * sp->x_diff[0] +
+                        sp->x_diff[1] * sp->x_diff[1] +
+                        sp->x_diff[2] * sp->x_diff[2];
+      dx2_max = max(dx2_max, dx2);
+
+      const float dx2_sort = sp->x_diff_sort[0] * sp->x_diff_sort[0] +
+                             sp->x_diff_sort[1] * sp->x_diff_sort[1] +
+                             sp->x_diff_sort[2] * sp->x_diff_sort[2];
+
+      dx2_max_sort = max(dx2_max_sort, dx2_sort);
+
+      /* Maximal smoothing length */
+      cell_h_max = max(cell_h_max, sp->h);
+
+      /* Get ready for a density calculation */
+      if (spart_is_active(sp, e)) {
+        stars_init_spart(sp);
+      }
+    }
+
+    /* Now, get the maximal particle motion from its square */
+    dx_max = sqrtf(dx2_max);
+    dx_max_sort = sqrtf(dx2_max_sort);
+
+    /* Store the values */
+    c->stars.h_max = cell_h_max;
+    c->stars.dx_max_part = dx_max;
+    c->stars.dx_max_sort = dx_max_sort;
+
+    /* Update the time of the last drift */
+    c->stars.ti_old_part = ti_current;
+  }
+
+  /* Clear the drift flags. */
+  c->stars.do_drift = 0;
+  c->stars.do_sub_drift = 0;
+}
+
+/**
+ * @brief Recursively drifts all multipoles in a cell hierarchy.
+ *
+ * @param c The #cell.
+ * @param e The #engine (to get ti_current).
+ */
+void cell_drift_all_multipoles(struct cell *c, const struct engine *e) {
+
+  const integertime_t ti_old_multipole = c->grav.ti_old_multipole;
+  const integertime_t ti_current = e->ti_current;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check that we are actually going to move forward. */
+  if (ti_current < ti_old_multipole) error("Attempt to drift to the past");
+#endif
+
+  /* Drift from the last time the cell was drifted to the current time */
+  double dt_drift;
+  if (e->policy & engine_policy_cosmology)
+    dt_drift =
+        cosmology_get_drift_factor(e->cosmology, ti_old_multipole, ti_current);
+  else
+    dt_drift = (ti_current - ti_old_multipole) * e->time_base;
+
+  /* Drift the multipole */
+  if (ti_current > ti_old_multipole) gravity_drift(c->grav.multipole, dt_drift);
+
+  /* Are we not in a leaf ? */
+  if (c->split) {
+
+    /* Loop over the progeny and recurse. */
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL) cell_drift_all_multipoles(c->progeny[k], e);
+  }
+
+  /* Update the time of the last drift */
+  c->grav.ti_old_multipole = ti_current;
+}
+
+/**
+ * @brief Drifts the multipole of a cell to the current time.
+ *
+ * Only drifts the multipole at this level. Multipoles deeper in the
+ * tree are not updated.
+ *
+ * @param c The #cell.
+ * @param e The #engine (to get ti_current).
+ */
+void cell_drift_multipole(struct cell *c, const struct engine *e) {
+
+  const integertime_t ti_old_multipole = c->grav.ti_old_multipole;
+  const integertime_t ti_current = e->ti_current;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check that we are actually going to move forward. */
+  if (ti_current < ti_old_multipole) error("Attempt to drift to the past");
+#endif
+
+  /* Drift from the last time the cell was drifted to the current time */
+  double dt_drift;
+  if (e->policy & engine_policy_cosmology)
+    dt_drift =
+        cosmology_get_drift_factor(e->cosmology, ti_old_multipole, ti_current);
   else
     dt_drift = (ti_current - ti_old_multipole) * e->time_base;
 
-  if (ti_current > ti_old_multipole) gravity_drift(c->multipole, dt_drift);
+  if (ti_current > ti_old_multipole) gravity_drift(c->grav.multipole, dt_drift);
 
   /* Update the time of the last drift */
-  c->ti_old_multipole = ti_current;
+  c->grav.ti_old_multipole = ti_current;
+}
+
+/**
+ * @brief Resets all the sorting properties for the stars in a given cell
+ * hierarchy.
+ *
+ * @param c The #cell to clean.
+ * @param is_super Is this a super-cell?
+ */
+void cell_clear_stars_sort_flags(struct cell *c, const int is_super) {
+
+  /* Recurse if possible */
+  if (c->split) {
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        cell_clear_stars_sort_flags(c->progeny[k], /*is_super=*/0);
+  }
+
+  /* Free the sorted array at the level where it was allocated */
+  if (is_super) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (c != c->hydro.super) error("Cell is not a super-cell!!!");
+#endif
+
+    for (int i = 0; i < 13; i++) {
+      free(c->stars.sort[i]);
+    }
+  }
+
+  /* Indicate that the cell is not sorted and cancel the pointer sorting arrays.
+   */
+  c->stars.sorted = 0;
+  for (int i = 0; i < 13; i++) {
+    c->stars.sort[i] = NULL;
+  }
 }
 
 /**
@@ -2886,7 +4317,8 @@ void cell_drift_multipole(struct cell *c, const struct engine *e) {
 void cell_check_timesteps(struct cell *c) {
 #ifdef SWIFT_DEBUG_CHECKS
 
-  if (c->ti_hydro_end_min == 0 && c->ti_gravity_end_min == 0 && c->nr_tasks > 0)
+  if (c->hydro.ti_end_min == 0 && c->grav.ti_end_min == 0 &&
+      c->stars.ti_end_min == 0 && c->nr_tasks > 0)
     error("Cell without assigned time-step");
 
   if (c->split) {
@@ -2895,8 +4327,8 @@ void cell_check_timesteps(struct cell *c) {
   } else {
 
     if (c->nodeID == engine_rank)
-      for (int i = 0; i < c->count; ++i)
-        if (c->parts[i].time_bin == 0)
+      for (int i = 0; i < c->hydro.count; ++i)
+        if (c->hydro.parts[i].time_bin == 0)
           error("Particle without assigned time-bin");
   }
 #else
@@ -2904,6 +4336,567 @@ void cell_check_timesteps(struct cell *c) {
 #endif
 }
 
+void cell_check_spart_pos(const struct cell *c,
+                          const struct spart *global_sparts) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Recurse */
+  if (c->split) {
+    for (int k = 0; k < 8; ++k)
+      if (c->progeny[k] != NULL)
+        cell_check_spart_pos(c->progeny[k], global_sparts);
+  }
+
+  struct spart *sparts = c->stars.parts;
+  const int count = c->stars.count;
+  for (int i = 0; i < count; ++i) {
+
+    const struct spart *sp = &sparts[i];
+    if ((sp->x[0] < c->loc[0] / space_stretch) ||
+        (sp->x[1] < c->loc[1] / space_stretch) ||
+        (sp->x[2] < c->loc[2] / space_stretch) ||
+        (sp->x[0] >= (c->loc[0] + c->width[0]) * space_stretch) ||
+        (sp->x[1] >= (c->loc[1] + c->width[1]) * space_stretch) ||
+        (sp->x[2] >= (c->loc[2] + c->width[2]) * space_stretch))
+      error("spart not in its cell!");
+
+    if (sp->time_bin != time_bin_not_created &&
+        sp->time_bin != time_bin_inhibited) {
+
+      const struct gpart *gp = sp->gpart;
+      if (gp == NULL && sp->time_bin != time_bin_not_created)
+        error("Unlinked spart!");
+
+      if (&global_sparts[-gp->id_or_neg_offset] != sp)
+        error("Incorrectly linked spart!");
+    }
+  }
+
+#else
+  error("Calling a degugging function outside debugging mode.");
+#endif
+}
+
+/**
+ * @brief Recursively update the pointer and counter for #spart after the
+ * addition of a new particle.
+ *
+ * @param c The cell we are working on.
+ * @param progeny_list The list of the progeny index at each level for the
+ * leaf-cell where the particle was added.
+ * @param main_branch Are we in a cell directly above the leaf where the new
+ * particle was added?
+ */
+void cell_recursively_shift_sparts(struct cell *c,
+                                   const int progeny_list[space_cell_maxdepth],
+                                   const int main_branch) {
+  if (c->split) {
+
+    /* No need to recurse in progenies located before the insestion point */
+    const int first_progeny = main_branch ? progeny_list[(int)c->depth] : 0;
+
+    for (int k = first_progeny; k < 8; ++k) {
+
+      if (c->progeny[k] != NULL)
+        cell_recursively_shift_sparts(c->progeny[k], progeny_list,
+                                      main_branch && (k == first_progeny));
+    }
+  }
+
+  /* When directly above the leaf with the new particle: increase the particle
+   * count */
+  /* When after the leaf with the new particle: shift by one position */
+  if (main_branch)
+    c->stars.count++;
+  else
+    c->stars.parts++;
+}
+
+/**
+ * @brief "Add" a #spart in a given #cell.
+ *
+ * This function will a a #spart at the start of the current cell's array by
+ * shifting all the #spart in the top-level cell by one position. All the
+ * pointers and cell counts are updated accordingly.
+ *
+ * @param e The #engine.
+ * @param c The leaf-cell in which to add the #spart.
+ *
+ * @return A pointer to the newly added #spart. The spart has a been zeroed and
+ * given a position within the cell as well as set to the minimal active time
+ * bin.
+ */
+struct spart *cell_add_spart(struct engine *e, struct cell *const c) {
+
+  /* Perform some basic consitency checks */
+  if (c->nodeID != engine_rank) error("Adding spart on a foreign node");
+  if (c->grav.ti_old_part != e->ti_current) error("Undrifted cell!");
+  if (c->split) error("Addition of spart performed above the leaf level");
+
+  /* Progeny number at each level */
+  int progeny[space_cell_maxdepth];
+#ifdef SWIFT_DEBUG_CHECKS
+  for (int i = 0; i < space_cell_maxdepth; ++i) progeny[i] = -1;
+#endif
+
+  /* Get the top-level this leaf cell is in and compute the progeny indices at
+     each level */
+  struct cell *top = c;
+  while (top->parent != NULL) {
+    for (int k = 0; k < 8; ++k) {
+      if (top->parent->progeny[k] == top) {
+        progeny[(int)top->parent->depth] = k;
+      }
+    }
+    top = top->parent;
+  }
+
+  /* Are there any extra particles left? */
+  if (top->stars.count == top->stars.count_total - 1) {
+    message("We ran out of star particles!");
+    atomic_inc(&e->forcerebuild);
+    return NULL;
+  }
+
+  /* Number of particles to shift in order to get a free space. */
+  const size_t n_copy = &top->stars.parts[top->stars.count] - c->stars.parts;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->stars.parts + n_copy > top->stars.parts + top->stars.count)
+    error("Copying beyond the allowed range");
+#endif
+
+  if (n_copy > 0) {
+
+    // MATTHIEU: This can be improved. We don't need to copy everything, just
+    // need to swap a few particles.
+    memmove(&c->stars.parts[1], &c->stars.parts[0],
+            n_copy * sizeof(struct spart));
+
+    /* Update the gpart->spart links (shift by 1) */
+    for (size_t i = 0; i < n_copy; ++i) {
+#ifdef SWIFT_DEBUG_CHECKS
+      if (c->stars.parts[i + 1].gpart == NULL) {
+        error("Incorrectly linked spart!");
+      }
+#endif
+      c->stars.parts[i + 1].gpart->id_or_neg_offset--;
+    }
+  }
+
+  /* Recursively shift all the stars to get a free spot at the start of the
+   * current cell*/
+  cell_recursively_shift_sparts(top, progeny, /* main_branch=*/1);
+
+  /* We now have an empty spart as the first particle in that cell */
+  struct spart *sp = &c->stars.parts[0];
+  bzero(sp, sizeof(struct spart));
+
+  /* Give it a decent position */
+  sp->x[0] = c->loc[0] + 0.5 * c->width[0];
+  sp->x[1] = c->loc[1] + 0.5 * c->width[1];
+  sp->x[2] = c->loc[2] + 0.5 * c->width[2];
+
+  /* Set it to the current time-bin */
+  sp->time_bin = e->min_active_bin;
+
+  top = c;
+  while (top->parent != NULL) {
+    top->grav.ti_end_min = e->ti_current;
+    top = top->parent;
+  }
+  top->grav.ti_end_min = e->ti_current;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Specify it was drifted to this point */
+  sp->ti_drift = e->ti_current;
+#endif
+
+  /* Register that we used one of the free slots. */
+  const size_t one = 1;
+  atomic_sub(&e->s->nr_extra_sparts, one);
+
+  return sp;
+}
+
+/**
+ * @brief "Remove" a gas particle from the calculation.
+ *
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine running on this node.
+ * @param c The #cell from which to remove the particle.
+ * @param p The #part to remove.
+ * @param xp The extended data of the particle to remove.
+ */
+void cell_remove_part(const struct engine *e, struct cell *c, struct part *p,
+                      struct xpart *xp) {
+
+  /* Quick cross-check */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  /* Mark the particle as inhibited */
+  p->time_bin = time_bin_inhibited;
+
+  /* Mark the gpart as inhibited and stand-alone */
+  if (p->gpart) {
+    p->gpart->time_bin = time_bin_inhibited;
+    p->gpart->id_or_neg_offset = p->id;
+    p->gpart->type = swift_type_dark_matter;
+  }
+
+  /* Un-link the part */
+  p->gpart = NULL;
+}
+
+/**
+ * @brief "Remove" a gravity particle from the calculation.
+ *
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine running on this node.
+ * @param c The #cell from which to remove the particle.
+ * @param gp The #gpart to remove.
+ */
+void cell_remove_gpart(const struct engine *e, struct cell *c,
+                       struct gpart *gp) {
+
+  /* Quick cross-check */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  if (gp->type != swift_type_dark_matter)
+    error("Trying to remove a non-dark matter gpart.");
+
+  /* Mark the particle as inhibited */
+  gp->time_bin = time_bin_inhibited;
+}
+
+/**
+ * @brief "Remove" a star particle from the calculation.
+ *
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine running on this node.
+ * @param c The #cell from which to remove the particle.
+ * @param sp The #spart to remove.
+ */
+void cell_remove_spart(const struct engine *e, struct cell *c,
+                       struct spart *sp) {
+
+  /* Quick cross-check */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  /* Mark the particle as inhibited and stand-alone */
+  sp->time_bin = time_bin_inhibited;
+  if (sp->gpart) {
+    sp->gpart->time_bin = time_bin_inhibited;
+    sp->gpart->id_or_neg_offset = sp->id;
+    sp->gpart->type = swift_type_dark_matter;
+  }
+
+  /* Un-link the spart */
+  sp->gpart = NULL;
+}
+
+/**
+ * @brief "Remove" a gas particle from the calculation and convert its gpart
+ * friend to a dark matter particle.
+ *
+ * Note that the #part is not destroyed. The pointer is still valid
+ * after this call and the properties of the #part are not altered
+ * apart from the time-bin and #gpart pointer.
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine running on this node.
+ * @param c The #cell from which to remove the particle.
+ * @param p The #part to remove.
+ * @param xp The extended data of the particle to remove.
+ *
+ * @return Pointer to the #gpart the #part has become. It carries the
+ * ID of the #part and has a dark matter type.
+ */
+struct gpart *cell_convert_part_to_gpart(const struct engine *e, struct cell *c,
+                                         struct part *p, struct xpart *xp) {
+
+  /* Quick cross-checks */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  if (p->gpart == NULL)
+    error("Trying to convert part without gpart friend to dark matter!");
+
+  /* Get a handle */
+  struct gpart *gp = p->gpart;
+
+  /* Mark the particle as inhibited */
+  p->time_bin = time_bin_inhibited;
+
+  /* Un-link the part */
+  p->gpart = NULL;
+
+  /* Mark the gpart as dark matter */
+  gp->type = swift_type_dark_matter;
+  gp->id_or_neg_offset = p->id;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  gp->ti_kick = p->ti_kick;
+#endif
+
+  return gp;
+}
+
+/**
+ * @brief "Remove" a spart particle from the calculation and convert its gpart
+ * friend to a dark matter particle.
+ *
+ * Note that the #spart is not destroyed. The pointer is still valid
+ * after this call and the properties of the #spart are not altered
+ * apart from the time-bin and #gpart pointer.
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine running on this node.
+ * @param c The #cell from which to remove the particle.
+ * @param sp The #spart to remove.
+ *
+ * @return Pointer to the #gpart the #spart has become. It carries the
+ * ID of the #spart and has a dark matter type.
+ */
+struct gpart *cell_convert_spart_to_gpart(const struct engine *e,
+                                          struct cell *c, struct spart *sp) {
+
+  /* Quick cross-check */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  if (sp->gpart == NULL)
+    error("Trying to convert spart without gpart friend to dark matter!");
+
+  /* Get a handle */
+  struct gpart *gp = sp->gpart;
+
+  /* Mark the particle as inhibited */
+  sp->time_bin = time_bin_inhibited;
+
+  /* Un-link the spart */
+  sp->gpart = NULL;
+
+  /* Mark the gpart as dark matter */
+  gp->type = swift_type_dark_matter;
+  gp->id_or_neg_offset = sp->id;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  gp->ti_kick = sp->ti_kick;
+#endif
+
+  return gp;
+}
+
+/**
+ * @brief "Remove" a #part from a #cell and replace it with a #spart
+ * connected to the same #gpart.
+ *
+ * Note that the #part is not destroyed. The pointer is still valid
+ * after this call and the properties of the #part are not altered
+ * apart from the time-bin and #gpart pointer.
+ * The particle is inhibited and will officially be removed at the next rebuild.
+ *
+ * @param e The #engine.
+ * @param c The #cell from which to remove the #part.
+ * @param p The #part to remove (must be inside c).
+ * @param xp The extended data of the #part.
+ *
+ * @return A fresh #spart with the same ID, position, velocity and
+ * time-bin as the original #part.
+ */
+struct spart *cell_convert_part_to_spart(struct engine *e, struct cell *c,
+                                         struct part *p, struct xpart *xp) {
+
+  /* Quick cross-check */
+  if (c->nodeID != e->nodeID)
+    error("Can't remove a particle in a foreign cell.");
+
+  if (p->gpart == NULL)
+    error("Trying to convert part without gpart friend to star!");
+
+  /* Create a fresh (empty) spart */
+  struct spart *sp = cell_add_spart(e, c);
+
+  /* Did we run out of free spart slots? */
+  if (sp == NULL) return NULL;
+
+  /* Destroy the gas particle and get it's gpart friend */
+  struct gpart *gp = cell_convert_part_to_gpart(e, c, p, xp);
+
+  /* Assign the ID back */
+  sp->id = gp->id_or_neg_offset;
+  gp->type = swift_type_stars;
+
+  /* Re-link things */
+  sp->gpart = gp;
+  gp->id_or_neg_offset = -(sp - e->s->sparts);
+
+  /* Synchronize clocks */
+  gp->time_bin = sp->time_bin;
+
+  /* Synchronize masses, positions and velocities */
+  sp->mass = gp->mass;
+  sp->x[0] = gp->x[0];
+  sp->x[1] = gp->x[1];
+  sp->x[2] = gp->x[2];
+  sp->v[0] = gp->v_full[0];
+  sp->v[1] = gp->v_full[1];
+  sp->v[2] = gp->v_full[2];
+
+#ifdef SWIFT_DEBUG_CHECKS
+  sp->ti_kick = gp->ti_kick;
+  gp->ti_drift = sp->ti_drift;
+#endif
+
+  /* Set a smoothing length */
+  sp->h = max(c->stars.h_max, c->hydro.h_max);
+
+  /* Here comes the Sun! */
+  return sp;
+}
+
+/**
+ * @brief Re-arrange the #part in a top-level cell such that all the extra ones
+ * for on-the-fly creation are located at the end of the array.
+ *
+ * @param c The #cell to sort.
+ * @param parts_offset The offset between the first #part in the array and the
+ * first #part in the global array in the space structure (for re-linking).
+ */
+void cell_reorder_extra_parts(struct cell *c, const ptrdiff_t parts_offset) {
+
+  struct part *parts = c->hydro.parts;
+  struct xpart *xparts = c->hydro.xparts;
+  const int count_real = c->hydro.count;
+
+  if (c->depth != 0 || c->nodeID != engine_rank)
+    error("This function should only be called on local top-level cells!");
+
+  int first_not_extra = count_real;
+
+  /* Find extra particles */
+  for (int i = 0; i < count_real; ++i) {
+    if (parts[i].time_bin == time_bin_not_created) {
+
+      /* Find the first non-extra particle after the end of the
+         real particles */
+      while (parts[first_not_extra].time_bin == time_bin_not_created) {
+        ++first_not_extra;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (first_not_extra >= count_real + space_extra_parts)
+        error("Looking for extra particles beyond this cell's range!");
+#endif
+
+      /* Swap everything, including g-part pointer */
+      memswap(&parts[i], &parts[first_not_extra], sizeof(struct part));
+      memswap(&xparts[i], &xparts[first_not_extra], sizeof(struct xpart));
+      if (parts[i].gpart)
+        parts[i].gpart->id_or_neg_offset = -(i + parts_offset);
+    }
+  }
+}
+
+/**
+ * @brief Re-arrange the #spart in a top-level cell such that all the extra ones
+ * for on-the-fly creation are located at the end of the array.
+ *
+ * @param c The #cell to sort.
+ * @param sparts_offset The offset between the first #spart in the array and the
+ * first #spart in the global array in the space structure (for re-linking).
+ */
+void cell_reorder_extra_sparts(struct cell *c, const ptrdiff_t sparts_offset) {
+
+  struct spart *sparts = c->stars.parts;
+  const int count_real = c->stars.count;
+
+  if (c->depth != 0 || c->nodeID != engine_rank)
+    error("This function should only be called on local top-level cells!");
+
+  int first_not_extra = count_real;
+
+  /* Find extra particles */
+  for (int i = 0; i < count_real; ++i) {
+    if (sparts[i].time_bin == time_bin_not_created) {
+
+      /* Find the first non-extra particle after the end of the
+         real particles */
+      while (sparts[first_not_extra].time_bin == time_bin_not_created) {
+        ++first_not_extra;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (first_not_extra >= count_real + space_extra_sparts)
+        error("Looking for extra particles beyond this cell's range!");
+#endif
+
+      /* Swap everything, including g-part pointer */
+      memswap(&sparts[i], &sparts[first_not_extra], sizeof(struct spart));
+      if (sparts[i].gpart)
+        sparts[i].gpart->id_or_neg_offset = -(i + sparts_offset);
+      sparts[first_not_extra].gpart = NULL;
+#ifdef SWIFT_DEBUG_CHECKS
+      if (sparts[first_not_extra].time_bin != time_bin_not_created)
+        error("Incorrect swap occured!");
+#endif
+    }
+  }
+}
+
+/**
+ * @brief Re-arrange the #gpart in a top-level cell such that all the extra ones
+ * for on-the-fly creation are located at the end of the array.
+ *
+ * @param c The #cell to sort.
+ * @param parts The global array of #part (for re-linking).
+ * @param sparts The global array of #spart (for re-linking).
+ */
+void cell_reorder_extra_gparts(struct cell *c, struct part *parts,
+                               struct spart *sparts) {
+
+  struct gpart *gparts = c->grav.parts;
+  const int count_real = c->grav.count;
+
+  if (c->depth != 0 || c->nodeID != engine_rank)
+    error("This function should only be called on local top-level cells!");
+
+  int first_not_extra = count_real;
+
+  /* Find extra particles */
+  for (int i = 0; i < count_real; ++i) {
+    if (gparts[i].time_bin == time_bin_not_created) {
+
+      /* Find the first non-extra particle after the end of the
+         real particles */
+      while (gparts[first_not_extra].time_bin == time_bin_not_created) {
+        ++first_not_extra;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (first_not_extra >= count_real + space_extra_gparts)
+        error("Looking for extra particles beyond this cell's range!");
+#endif
+
+      /* Swap everything (including pointers) */
+      memswap(&gparts[i], &gparts[first_not_extra], sizeof(struct gpart));
+      if (gparts[i].type == swift_type_gas) {
+        parts[-gparts[i].id_or_neg_offset].gpart = &gparts[i];
+      } else if (gparts[i].type == swift_type_stars) {
+        sparts[-gparts[i].id_or_neg_offset].gpart = &gparts[i];
+      }
+    }
+  }
+}
+
 /**
  * @brief Can we use the MM interactions fo a given pair of cells?
  *
@@ -2920,8 +4913,8 @@ int cell_can_use_pair_mm(const struct cell *ci, const struct cell *cj,
   const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
 
   /* Recover the multipole information */
-  const struct gravity_tensors *const multi_i = ci->multipole;
-  const struct gravity_tensors *const multi_j = cj->multipole;
+  const struct gravity_tensors *const multi_i = ci->grav.multipole;
+  const struct gravity_tensors *const multi_j = cj->grav.multipole;
 
   /* Get the distance between the CoMs */
   double dx = multi_i->CoM[0] - multi_j->CoM[0];
@@ -2938,3 +4931,67 @@ int cell_can_use_pair_mm(const struct cell *ci, const struct cell *cj,
 
   return gravity_M2L_accept(multi_i->r_max, multi_j->r_max, theta_crit2, r2);
 }
+
+/**
+ * @brief Can we use the MM interactions fo a given pair of cells?
+ *
+ * This function uses the information gathered in the multipole at rebuild
+ * time and not the current position and radius of the multipole.
+ *
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ * @param e The #engine.
+ * @param s The #space.
+ */
+int cell_can_use_pair_mm_rebuild(const struct cell *ci, const struct cell *cj,
+                                 const struct engine *e,
+                                 const struct space *s) {
+
+  const double theta_crit2 = e->gravity_properties->theta_crit2;
+  const int periodic = s->periodic;
+  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
+
+  /* Recover the multipole information */
+  const struct gravity_tensors *const multi_i = ci->grav.multipole;
+  const struct gravity_tensors *const multi_j = cj->grav.multipole;
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  if (multi_i->CoM_rebuild[0] < ci->loc[0] ||
+      multi_i->CoM_rebuild[0] > ci->loc[0] + ci->width[0])
+    error("Invalid multipole position ci");
+  if (multi_i->CoM_rebuild[1] < ci->loc[1] ||
+      multi_i->CoM_rebuild[1] > ci->loc[1] + ci->width[1])
+    error("Invalid multipole position ci");
+  if (multi_i->CoM_rebuild[2] < ci->loc[2] ||
+      multi_i->CoM_rebuild[2] > ci->loc[2] + ci->width[2])
+    error("Invalid multipole position ci");
+
+  if (multi_j->CoM_rebuild[0] < cj->loc[0] ||
+      multi_j->CoM_rebuild[0] > cj->loc[0] + cj->width[0])
+    error("Invalid multipole position cj");
+  if (multi_j->CoM_rebuild[1] < cj->loc[1] ||
+      multi_j->CoM_rebuild[1] > cj->loc[1] + cj->width[1])
+    error("Invalid multipole position cj");
+  if (multi_j->CoM_rebuild[2] < cj->loc[2] ||
+      multi_j->CoM_rebuild[2] > cj->loc[2] + cj->width[2])
+    error("Invalid multipole position cj");
+
+#endif
+
+  /* Get the distance between the CoMs */
+  double dx = multi_i->CoM_rebuild[0] - multi_j->CoM_rebuild[0];
+  double dy = multi_i->CoM_rebuild[1] - multi_j->CoM_rebuild[1];
+  double dz = multi_i->CoM_rebuild[2] - multi_j->CoM_rebuild[2];
+
+  /* Apply BC */
+  if (periodic) {
+    dx = nearest(dx, dim[0]);
+    dy = nearest(dy, dim[1]);
+    dz = nearest(dz, dim[2]);
+  }
+  const double r2 = dx * dx + dy * dy + dz * dz;
+
+  return gravity_M2L_accept(multi_i->r_max_rebuild, multi_j->r_max_rebuild,
+                            theta_crit2, r2);
+}
diff --git a/src/cell.h b/src/cell.h
index 9438804e12d599f7e36bf937b9bdc2958793feac..baae12e4a5797e3b1e00eee144635e41b52c157e 100644
--- a/src/cell.h
+++ b/src/cell.h
@@ -77,44 +77,89 @@ struct link {
  */
 struct pcell {
 
-  /*! Maximal smoothing length. */
-  double h_max;
+  /*! Hydro variables */
+  struct {
 
-  /*! Minimal integer end-of-timestep in this cell for hydro tasks */
-  integertime_t ti_hydro_end_min;
+    /*! Maximal smoothing length. */
+    double h_max;
 
-  /*! Maximal integer end-of-timestep in this cell for hydro tasks */
-  integertime_t ti_hydro_end_max;
+    /*! Minimal integer end-of-timestep in this cell for hydro tasks */
+    integertime_t ti_end_min;
 
-  /*! Maximal integer beginning-of-timestep in this cell for hydro tasks */
-  integertime_t ti_hydro_beg_max;
+    /*! Maximal integer end-of-timestep in this cell for hydro tasks */
+    integertime_t ti_end_max;
 
-  /*! Minimal integer end-of-timestep in this cell for gravity tasks */
-  integertime_t ti_gravity_end_min;
+    /*! Maximal integer beginning-of-timestep in this cell for hydro tasks */
+    integertime_t ti_beg_max;
 
-  /*! Maximal integer end-of-timestep in this cell for gravity tasks */
-  integertime_t ti_gravity_end_max;
+    /*! Integer time of the last drift of the #part in this cell */
+    integertime_t ti_old_part;
 
-  /*! Maximal integer beginning-of-timestep in this cell for gravity tasks */
-  integertime_t ti_gravity_beg_max;
+    /*! Number of #part in this cell. */
+    int count;
 
-  /*! Integer time of the last drift of the #part in this cell */
-  integertime_t ti_old_part;
+  } hydro;
 
-  /*! Integer time of the last drift of the #gpart in this cell */
-  integertime_t ti_old_gpart;
+  /*! Gravity variables */
+  struct {
 
-  /*! Integer time of the last drift of the #multipole in this cell */
-  integertime_t ti_old_multipole;
+    /*! This cell's gravity-related tensors */
+    struct multipole m_pole;
 
-  /*! Number of #part in this cell. */
-  int count;
+    /*! Centre of mass. */
+    double CoM[3];
 
-  /*! Number of #gpart in this cell. */
-  int gcount;
+    /*! Centre of mass at rebuild time. */
+    double CoM_rebuild[3];
 
-  /*! Number of #spart in this cell. */
-  int scount;
+    /*! Upper limit of the CoM<->gpart distance. */
+    double r_max;
+
+    /*! Upper limit of the CoM<->gpart distance at last rebuild. */
+    double r_max_rebuild;
+
+    /*! Minimal integer end-of-timestep in this cell for gravity tasks */
+    integertime_t ti_end_min;
+
+    /*! Maximal integer end-of-timestep in this cell for gravity tasks */
+    integertime_t ti_end_max;
+
+    /*! Maximal integer beginning-of-timestep in this cell for gravity tasks */
+    integertime_t ti_beg_max;
+
+    /*! Integer time of the last drift of the #gpart in this cell */
+    integertime_t ti_old_part;
+
+    /*! Integer time of the last drift of the #multipole in this cell */
+    integertime_t ti_old_multipole;
+
+    /*! Number of #gpart in this cell. */
+    int count;
+
+  } grav;
+
+  /*! Stars variables */
+  struct {
+
+    /*! Number of #spart in this cell. */
+    int count;
+
+    /*! Maximal smoothing length. */
+    double h_max;
+
+    /*! Minimal integer end-of-timestep in this cell for stars tasks */
+    integertime_t ti_end_min;
+
+    /*! Maximal integer end-of-timestep in this cell for stars tasks */
+    integertime_t ti_end_max;
+
+    /*! Integer time of the last drift of the #spart in this cell */
+    integertime_t ti_old_part;
+
+  } stars;
+
+  /*! Maximal depth in that part of the tree */
+  int maxdepth;
 
   /*! Relative indices of the cell's progeny. */
   int progeny[8];
@@ -131,20 +176,44 @@ struct pcell {
  */
 struct pcell_step {
 
-  /*! Minimal integer end-of-timestep in this cell (hydro) */
-  integertime_t ti_hydro_end_min;
+  /*! Hydro variables */
+  struct {
+
+    /*! Minimal integer end-of-timestep in this cell (hydro) */
+    integertime_t ti_end_min;
+
+    /*! Minimal integer end-of-timestep in this cell (hydro) */
+    integertime_t ti_end_max;
+
+    /*! Maximal distance any #part has travelled since last rebuild */
+    float dx_max_part;
+
+  } hydro;
 
-  /*! Minimal integer end-of-timestep in this cell (hydro) */
-  integertime_t ti_hydro_end_max;
+  /*! Grav variables */
+  struct {
 
-  /*! Minimal integer end-of-timestep in this cell (gravity) */
-  integertime_t ti_gravity_end_min;
+    /*! Minimal integer end-of-timestep in this cell (gravity) */
+    integertime_t ti_end_min;
 
-  /*! Minimal integer end-of-timestep in this cell (gravity) */
-  integertime_t ti_gravity_end_max;
+    /*! Minimal integer end-of-timestep in this cell (gravity) */
+    integertime_t ti_end_max;
 
-  /*! Maximal distance any #part has travelled since last rebuild */
-  float dx_max_part;
+  } grav;
+
+  /*! Stars variables */
+  struct {
+
+    /*! Minimal integer end-of-timestep in this cell (stars) */
+    integertime_t ti_end_min;
+
+    /*! Maximal integer end-of-timestep in this cell (stars) */
+    integertime_t ti_end_max;
+
+    /*! Maximal distance any #part has travelled since last rebuild */
+    float dx_max_part;
+
+  } stars;
 };
 
 /**
@@ -160,264 +229,451 @@ struct cell {
   /*! The cell dimensions. */
   double width[3];
 
-  /*! Max smoothing length in this cell. */
-  double h_max;
-
-  /*! This cell's multipole. */
-  struct gravity_tensors *multipole;
+  /*! Pointers to the next level of cells. */
+  struct cell *progeny[8];
 
   /*! Linking pointer for "memory management". */
   struct cell *next;
 
-  /*! Pointer to the #part data. */
-  struct part *parts;
+  /*! Parent cell. */
+  struct cell *parent;
 
-  /*! Pointer to the #xpart data. */
-  struct xpart *xparts;
+  /*! Super cell, i.e. the highest-level parent cell with *any* task */
+  struct cell *super;
 
-  /*! Pointer to the #gpart data. */
-  struct gpart *gparts;
+  /*! Hydro variables */
+  struct {
 
-  /*! Pointer to the #spart data. */
-  struct spart *sparts;
+    /*! Pointer to the #part data. */
+    struct part *parts;
 
-  /*! Pointer for the sorted indices. */
-  struct entry *sort[13];
+    /*! Pointer to the #xpart data. */
+    struct xpart *xparts;
 
-  /*! Pointers to the next level of cells. */
-  struct cell *progeny[8];
+    /*! Pointer for the sorted indices. */
+    struct entry *sort[13];
 
-  /*! Parent cell. */
-  struct cell *parent;
+    /*! Super cell, i.e. the highest-level parent cell that has a hydro
+     * pair/self tasks */
+    struct cell *super;
 
-  /*! Super cell, i.e. the highest-level parent cell with *any* task */
-  struct cell *super;
+    /*! The task computing this cell's sorts. */
+    struct task *sorts;
 
-  /*! Super cell, i.e. the highest-level parent cell that has a hydro pair/self
-   * tasks */
-  struct cell *super_hydro;
+    /*! The drift task for parts */
+    struct task *drift;
 
-  /*! Super cell, i.e. the highest-level parent cell that has a grav pair/self
-   * tasks */
-  struct cell *super_gravity;
+    /*! Linked list of the tasks computing this cell's hydro density. */
+    struct link *density;
 
-  /*! Linked list of the tasks computing this cell's hydro density. */
-  struct link *density;
+    /* Linked list of the tasks computing this cell's hydro gradients. */
+    struct link *gradient;
 
-  /* Linked list of the tasks computing this cell's hydro gradients. */
-  struct link *gradient;
+    /*! Linked list of the tasks computing this cell's hydro forces. */
+    struct link *force;
 
-  /*! Linked list of the tasks computing this cell's hydro forces. */
-  struct link *force;
+    /*! Linked list of the tasks computing this cell's limiter. */
+    struct link *limiter;
 
-  /*! Linked list of the tasks computing this cell's gravity forces. */
-  struct link *grav;
+    /*! Dependency implicit task for the ghost  (in->ghost->out)*/
+    struct task *ghost_in;
 
-  /*! The task computing this cell's sorts. */
-  struct task *sorts;
+    /*! Dependency implicit task for the ghost  (in->ghost->out)*/
+    struct task *ghost_out;
 
-  /*! The multipole initialistation task */
-  struct task *init_grav;
+    /*! The ghost task itself */
+    struct task *ghost;
 
-  /*! Implicit task for the gravity initialisation */
-  struct task *init_grav_out;
+    /*! The extra ghost task for complex hydro schemes */
+    struct task *extra_ghost;
 
-  /*! Dependency implicit task for the ghost  (in->ghost->out)*/
-  struct task *ghost_in;
+    /*! The task to end the force calculation */
+    struct task *end_force;
 
-  /*! Dependency implicit task for the ghost  (in->ghost->out)*/
-  struct task *ghost_out;
+    /*! Task for cooling */
+    struct task *cooling;
 
-  /*! The ghost task itself */
-  struct task *ghost;
+    /*! Task for star formation */
+    struct task *star_formation;
 
-  /*! The extra ghost task for complex hydro schemes */
-  struct task *extra_ghost;
+    /*! Max smoothing length in this cell. */
+    double h_max;
 
-  /*! The drift task for parts */
-  struct task *drift_part;
+    /*! Last (integer) time the cell's part were drifted forward in time. */
+    integertime_t ti_old_part;
 
-  /*! The drift task for gparts */
-  struct task *drift_gpart;
+    /*! Minimum end of (integer) time step in this cell for hydro tasks. */
+    integertime_t ti_end_min;
 
-  /*! The first kick task */
-  struct task *kick1;
+    /*! Maximum end of (integer) time step in this cell for hydro tasks. */
+    integertime_t ti_end_max;
 
-  /*! The second kick task */
-  struct task *kick2;
+    /*! Maximum beginning of (integer) time step in this cell for hydro tasks.
+     */
+    integertime_t ti_beg_max;
 
-  /*! The task to end the force calculation */
-  struct task *end_force;
+    /*! Spin lock for various uses (#part case). */
+    swift_lock_type lock;
 
-  /*! The task to compute time-steps */
-  struct task *timestep;
+    /*! Maximum part movement in this cell since last construction. */
+    float dx_max_part;
 
-  /*! Task computing long range non-periodic gravity interactions */
-  struct task *grav_long_range;
+    /*! Maximum particle movement in this cell since the last sort. */
+    float dx_max_sort;
 
-  /*! Implicit task for the down propagation */
-  struct task *grav_down_in;
+    /*! Values of h_max before the drifts, used for sub-cell tasks. */
+    float h_max_old;
 
-  /*! Task propagating the mesh forces to the particles */
-  struct task *grav_mesh;
+    /*! Values of dx_max before the drifts, used for sub-cell tasks. */
+    float dx_max_part_old;
 
-  /*! Task propagating the multipole to the particles */
-  struct task *grav_down;
+    /*! Values of dx_max_sort before the drifts, used for sub-cell tasks. */
+    float dx_max_sort_old;
 
-  /*! Task for cooling */
-  struct task *cooling;
+    /*! Nr of #part in this cell. */
+    int count;
 
-  /*! Task for source terms */
-  struct task *sourceterms;
+    /*! Nr of #part this cell can hold after addition of new #part. */
+    int count_total;
 
-#ifdef WITH_MPI
+    /*! Number of #part updated in this cell. */
+    int updated;
 
-  /* Task receiving hydro data (positions). */
-  struct task *recv_xv;
+    /*! Number of #part inhibited in this cell. */
+    int inhibited;
 
-  /* Task receiving hydro data (density). */
-  struct task *recv_rho;
+    /*! Is the #part data of this cell being used in a sub-cell? */
+    int hold;
 
-  /* Task receiving hydro data (gradient). */
-  struct task *recv_gradient;
+    /*! Bit mask of sort directions that will be needed in the next timestep. */
+    unsigned int requires_sorts;
 
-  /* Task receiving gpart data. */
-  struct task *recv_grav;
+    /*! Bit mask of sorts that need to be computed for this cell. */
+    unsigned int do_sort;
 
-  /* Task receiving data (time-step). */
-  struct task *recv_ti;
+    /*! Bit-mask indicating the sorted directions */
+    unsigned int sorted;
 
-  /* Linked list for sending hydro data (positions). */
-  struct link *send_xv;
+    /*! Does this cell need to be drifted (hydro)? */
+    char do_drift;
 
-  /* Linked list for sending hydro data (density). */
-  struct link *send_rho;
+    /*! Do any of this cell's sub-cells need to be drifted (hydro)? */
+    char do_sub_drift;
 
-  /* Linked list for sending hydro data (gradient). */
-  struct link *send_gradient;
+    /*! Do any of this cell's sub-cells need to be sorted? */
+    char do_sub_sort;
 
-  /* Linked list for sending gpart data. */
-  struct link *send_grav;
+    /*! Does this cell need to be limited? */
+    char do_limiter;
 
-  /* Linked list for sending data (time-step). */
-  struct link *send_ti;
+    /*! Do any of this cell's sub-cells need to be limited? */
+    char do_sub_limiter;
 
-  /*! Bit mask of the proxies this cell is registered with. */
-  unsigned long long int sendto;
+#ifdef SWIFT_DEBUG_CHECKS
 
-  /*! Pointer to this cell's packed representation. */
-  struct pcell *pcell;
+    /*! Last (integer) time the cell's sort arrays were updated. */
+    integertime_t ti_sort;
 
-  /*! Size of the packed representation */
-  int pcell_size;
+#endif
 
-  /*! MPI tag associated with this cell */
-  int tag;
+  } hydro;
 
-#endif
+  /*! Grav variables */
+  struct {
 
-  /*! Minimum end of (integer) time step in this cell for hydro tasks. */
-  integertime_t ti_hydro_end_min;
+    /*! Pointer to the #gpart data. */
+    struct gpart *parts;
 
-  /*! Maximum end of (integer) time step in this cell for hydro tasks. */
-  integertime_t ti_hydro_end_max;
+    /*! This cell's multipole. */
+    struct gravity_tensors *multipole;
 
-  /*! Maximum beginning of (integer) time step in this cell for hydro tasks. */
-  integertime_t ti_hydro_beg_max;
+    /*! Super cell, i.e. the highest-level parent cell that has a grav pair/self
+     * tasks */
+    struct cell *super;
 
-  /*! Minimum end of (integer) time step in this cell for gravity tasks. */
-  integertime_t ti_gravity_end_min;
+    /*! The drift task for gparts */
+    struct task *drift;
 
-  /*! Maximum end of (integer) time step in this cell for gravity tasks. */
-  integertime_t ti_gravity_end_max;
+    /*! Implicit task (going up- and down the tree) for the #gpart drifts */
+    struct task *drift_out;
 
-  /*! Maximum beginning of (integer) time step in this cell for gravity tasks.
-   */
-  integertime_t ti_gravity_beg_max;
+    /*! Linked list of the tasks computing this cell's gravity forces. */
+    struct link *grav;
 
-  /*! Last (integer) time the cell's part were drifted forward in time. */
-  integertime_t ti_old_part;
+    /*! Linked list of the tasks computing this cell's gravity M-M forces. */
+    struct link *mm;
 
-  /*! Last (integer) time the cell's gpart were drifted forward in time. */
-  integertime_t ti_old_gpart;
+    /*! The multipole initialistation task */
+    struct task *init;
 
-  /*! Last (integer) time the cell's multipole was drifted forward in time. */
-  integertime_t ti_old_multipole;
+    /*! Implicit task for the gravity initialisation */
+    struct task *init_out;
 
-  /*! Minimum dimension, i.e. smallest edge of this cell (min(width)). */
-  float dmin;
+    /*! Task computing long range non-periodic gravity interactions */
+    struct task *long_range;
 
-  /*! Maximum particle movement in this cell since the last sort. */
-  float dx_max_sort;
+    /*! Implicit task for the down propagation */
+    struct task *down_in;
 
-  /*! Maximum part movement in this cell since last construction. */
-  float dx_max_part;
+    /*! Task propagating the mesh forces to the particles */
+    struct task *mesh;
 
-  /*! Nr of #part in this cell. */
-  int count;
+    /*! Task propagating the multipole to the particles */
+    struct task *down;
 
-  /*! Nr of #gpart in this cell. */
-  int gcount;
+    /*! The task to end the force calculation */
+    struct task *end_force;
 
-  /*! Nr of #spart in this cell. */
-  int scount;
+    /*! Minimum end of (integer) time step in this cell for gravity tasks. */
+    integertime_t ti_end_min;
 
-  /*! Bit-mask indicating the sorted directions */
-  unsigned int sorted;
+    /*! Maximum end of (integer) time step in this cell for gravity tasks. */
+    integertime_t ti_end_max;
 
-  /*! Spin lock for various uses (#part case). */
-  swift_lock_type lock;
+    /*! Maximum beginning of (integer) time step in this cell for gravity tasks.
+     */
+    integertime_t ti_beg_max;
 
-  /*! Spin lock for various uses (#gpart case). */
-  swift_lock_type glock;
+    /*! Last (integer) time the cell's gpart were drifted forward in time. */
+    integertime_t ti_old_part;
 
-  /*! Spin lock for various uses (#multipole case). */
-  swift_lock_type mlock;
+    /*! Last (integer) time the cell's multipole was drifted forward in time. */
+    integertime_t ti_old_multipole;
 
-  /*! Spin lock for various uses (#spart case). */
-  swift_lock_type slock;
+    /*! Spin lock for various uses (#gpart case). */
+    swift_lock_type plock;
 
-  /*! ID of the previous owner, e.g. runner. */
-  int owner;
+    /*! Spin lock for various uses (#multipole case). */
+    swift_lock_type mlock;
 
-  /*! Number of #part updated in this cell. */
-  int updated;
+    /*! Nr of #gpart in this cell. */
+    int count;
 
-  /*! Number of #gpart updated in this cell. */
-  int g_updated;
+    /*! Nr of #gpart this cell can hold after addition of new #gpart. */
+    int count_total;
 
-  /*! Number of #spart updated in this cell. */
-  int s_updated;
+    /*! Number of #gpart updated in this cell. */
+    int updated;
 
-  /*! ID of the node this cell lives on. */
-  int nodeID;
+    /*! Number of #gpart inhibited in this cell. */
+    int inhibited;
+
+    /*! Is the #gpart data of this cell being used in a sub-cell? */
+    int phold;
+
+    /*! Is the #multipole data of this cell being used in a sub-cell? */
+    int mhold;
+
+    /*! Number of M-M tasks that are associated with this cell. */
+    short int nr_mm_tasks;
+
+    /*! Does this cell need to be drifted (gravity)? */
+    char do_drift;
+
+    /*! Do any of this cell's sub-cells need to be drifted (gravity)? */
+    char do_sub_drift;
+
+  } grav;
+
+  /*! Stars variables */
+  struct {
+
+    /*! Pointer to the #spart data. */
+    struct spart *parts;
+
+    /*! The star ghost task itself */
+    struct task *ghost;
+
+    /*! Linked list of the tasks computing this cell's star density. */
+    struct link *density;
+
+    /*! Linked list of the tasks computing this cell's star feedback. */
+    struct link *feedback;
+
+    /*! The task computing this cell's sorts before the density. */
+    struct task *sorts;
+
+    /*! The drift task for sparts */
+    struct task *drift;
+
+    /*! Implicit tasks marking the entry of the stellar physics block of tasks
+     */
+    struct task *stars_in;
+
+    /*! Implicit tasks marking the exit of the stellar physics block of tasks */
+    struct task *stars_out;
+
+    /*! Max smoothing length in this cell. */
+    double h_max;
+
+    /*! Last (integer) time the cell's spart were drifted forward in time. */
+    integertime_t ti_old_part;
+
+    /*! Spin lock for various uses (#spart case). */
+    swift_lock_type lock;
+
+    /*! Nr of #spart in this cell. */
+    int count;
+
+    /*! Nr of #spart this cell can hold after addition of new #spart. */
+    int count_total;
+
+    /*! Values of h_max before the drifts, used for sub-cell tasks. */
+    float h_max_old;
+
+    /*! Maximum part movement in this cell since last construction. */
+    float dx_max_part;
+
+    /*! Values of dx_max before the drifts, used for sub-cell tasks. */
+    float dx_max_part_old;
+
+    /*! Maximum particle movement in this cell since the last sort. */
+    float dx_max_sort;
+
+    /*! Values of dx_max_sort before the drifts, used for sub-cell tasks. */
+    float dx_max_sort_old;
+
+    /*! Bit mask of sort directions that will be needed in the next timestep. */
+    unsigned int requires_sorts;
+
+    /*! Pointer for the sorted indices. */
+    struct entry *sort[13];
+
+    /*! Bit-mask indicating the sorted directions */
+    unsigned int sorted;
+
+    /*! Bit mask of sorts that need to be computed for this cell. */
+    unsigned int do_sort;
+
+    /*! Do any of this cell's sub-cells need to be sorted? */
+    char do_sub_sort;
+
+    /*! Maximum end of (integer) time step in this cell for gravity tasks. */
+    integertime_t ti_end_min;
+
+    /*! Maximum end of (integer) time step in this cell for star tasks. */
+    integertime_t ti_end_max;
+
+    /*! Maximum beginning of (integer) time step in this cell for star tasks.
+     */
+    integertime_t ti_beg_max;
+
+    /*! Number of #spart updated in this cell. */
+    int updated;
+
+    /*! Number of #spart inhibited in this cell. */
+    int inhibited;
+
+    /*! Is the #spart data of this cell being used in a sub-cell? */
+    int hold;
+
+    /*! Does this cell need to be drifted (stars)? */
+    char do_drift;
+
+    /*! Do any of this cell's sub-cells need to be drifted (stars)? */
+    char do_sub_drift;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /*! Last (integer) time the cell's sort arrays were updated. */
+    integertime_t ti_sort;
+#endif
+
+  } stars;
+
+#ifdef WITH_MPI
+  /*! MPI variables */
+  struct {
+
+    struct {
+      /* Task receiving hydro data (positions). */
+      struct task *recv_xv;
+
+      /* Task receiving hydro data (density). */
+      struct task *recv_rho;
+
+      /* Task receiving hydro data (gradient). */
+      struct task *recv_gradient;
 
-  /*! Is the #part data of this cell being used in a sub-cell? */
-  int hold;
+      /* Linked list for sending hydro data (positions). */
+      struct link *send_xv;
 
-  /*! Is the #gpart data of this cell being used in a sub-cell? */
-  int ghold;
+      /* Linked list for sending hydro data (density). */
+      struct link *send_rho;
 
-  /*! Is the #multipole data of this cell being used in a sub-cell? */
-  int mhold;
+      /* Linked list for sending hydro data (gradient). */
+      struct link *send_gradient;
 
-  /*! Is the #spart data of this cell being used in a sub-cell? */
-  int shold;
+    } hydro;
 
-  /*! Values of dx_max before the drifts, used for sub-cell tasks. */
-  float dx_max_old;
+    struct {
 
-  /*! Values of h_max before the drifts, used for sub-cell tasks. */
-  float h_max_old;
+      /* Task receiving gpart data. */
+      struct task *recv;
 
-  /*! Values of dx_max_sort before the drifts, used for sub-cell tasks. */
-  float dx_max_sort_old;
+      /* Linked list for sending gpart data. */
+      struct link *send;
+    } grav;
 
-  /*! Bit mask of sort directions that will be needed in the next timestep. */
-  unsigned int requires_sorts;
+    struct {
+      /* Task receiving spart data. */
+      struct task *recv;
 
-  /*! Bit mask of sorts that need to be computed for this cell. */
-  unsigned int do_sort;
+      /* Linked list for sending spart data. */
+      struct link *send;
+    } stars;
+
+    struct {
+      /* Task receiving limiter data. */
+      struct task *recv;
+
+      /* Linked list for sending limiter data. */
+      struct link *send;
+    } limiter;
+
+    /* Task receiving data (time-step). */
+    struct task *recv_ti;
+
+    /* Linked list for sending data (time-step). */
+    struct link *send_ti;
+
+    /*! Bit mask of the proxies this cell is registered with. */
+    unsigned long long int sendto;
+
+    /*! Pointer to this cell's packed representation. */
+    struct pcell *pcell;
+
+    /*! Size of the packed representation */
+    int pcell_size;
+
+    /*! MPI tag associated with this cell */
+    int tag;
+
+  } mpi;
+#endif
+
+  /*! The first kick task */
+  struct task *kick1;
+
+  /*! The second kick task */
+  struct task *kick2;
+
+  /*! The task to compute time-steps */
+  struct task *timestep;
+
+  /*! The task to limit the time-step of inactive particles */
+  struct task *timestep_limiter;
+
+  /*! The logger task */
+  struct task *logger;
+
+  /*! Minimum dimension, i.e. smallest edge of this cell (min(width)). */
+  float dmin;
+
+  /*! ID of the previous owner, e.g. runner. */
+  int owner;
+
+  /*! ID of the node this cell lives on. */
+  int nodeID;
 
   /*! Number of tasks that are associated with this cell. */
   short int nr_tasks;
@@ -431,28 +687,10 @@ struct cell {
   /*! The maximal depth of this cell and its progenies */
   char maxdepth;
 
-  /*! Does this cell need to be drifted (hydro)? */
-  char do_drift;
-
-  /*! Do any of this cell's sub-cells need to be drifted (hydro)? */
-  char do_sub_drift;
-
-  /*! Does this cell need to be drifted (gravity)? */
-  char do_grav_drift;
-
-  /*! Do any of this cell's sub-cells need to be drifted (gravity)? */
-  char do_grav_sub_drift;
-
-  /*! Do any of this cell's sub-cells need to be sorted? */
-  char do_sub_sort;
-
 #ifdef SWIFT_DEBUG_CHECKS
   /* Cell ID (for debugging) */
   int cellID;
 
-  /*! Last (integer) time the cell's sort arrays were updated. */
-  integertime_t ti_sort;
-
   /*! The list of tasks that have been executed on this cell */
   char tasks_executed[64];
 
@@ -479,8 +717,9 @@ int cell_mlocktree(struct cell *c);
 void cell_munlocktree(struct cell *c);
 int cell_slocktree(struct cell *c);
 void cell_sunlocktree(struct cell *c);
-int cell_pack(struct cell *c, struct pcell *pc);
-int cell_unpack(struct pcell *pc, struct cell *c, struct space *s);
+int cell_pack(struct cell *c, struct pcell *pc, const int with_gravity);
+int cell_unpack(struct pcell *pc, struct cell *c, struct space *s,
+                const int with_gravity);
 int cell_pack_tags(const struct cell *c, int *tags);
 int cell_unpack_tags(const int *tags, struct cell *c);
 int cell_pack_end_step(struct cell *c, struct pcell_step *pcell);
@@ -491,19 +730,26 @@ int cell_getsize(struct cell *c);
 int cell_link_parts(struct cell *c, struct part *parts);
 int cell_link_gparts(struct cell *c, struct gpart *gparts);
 int cell_link_sparts(struct cell *c, struct spart *sparts);
+int cell_link_foreign_parts(struct cell *c, struct part *parts);
+int cell_link_foreign_gparts(struct cell *c, struct gpart *gparts);
+int cell_count_parts_for_tasks(const struct cell *c);
+int cell_count_gparts_for_tasks(const struct cell *c);
 void cell_clean_links(struct cell *c, void *data);
 void cell_make_multipoles(struct cell *c, integertime_t ti_current);
-void cell_check_multipole(struct cell *c, void *data);
+void cell_check_multipole(struct cell *c);
+void cell_check_foreign_multipole(const struct cell *c);
 void cell_clean(struct cell *c);
 void cell_check_part_drift_point(struct cell *c, void *data);
 void cell_check_gpart_drift_point(struct cell *c, void *data);
+void cell_check_spart_drift_point(struct cell *c, void *data);
 void cell_check_multipole_drift_point(struct cell *c, void *data);
 void cell_reset_task_counters(struct cell *c);
 int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s);
+int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s);
 int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s);
-void cell_set_super(struct cell *c, struct cell *super);
 void cell_drift_part(struct cell *c, const struct engine *e, int force);
 void cell_drift_gpart(struct cell *c, const struct engine *e, int force);
+void cell_drift_spart(struct cell *c, const struct engine *e, int force);
 void cell_drift_multipole(struct cell *c, const struct engine *e);
 void cell_drift_all_multipoles(struct cell *c, const struct engine *e);
 void cell_check_timesteps(struct cell *c);
@@ -512,16 +758,106 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
                                        struct scheduler *s);
 void cell_activate_subcell_grav_tasks(struct cell *ci, struct cell *cj,
                                       struct scheduler *s);
+void cell_activate_subcell_stars_tasks(struct cell *ci, struct cell *cj,
+                                       struct scheduler *s);
 void cell_activate_subcell_external_grav_tasks(struct cell *ci,
                                                struct scheduler *s);
 void cell_activate_drift_part(struct cell *c, struct scheduler *s);
 void cell_activate_drift_gpart(struct cell *c, struct scheduler *s);
-void cell_activate_sorts(struct cell *c, int sid, struct scheduler *s);
+void cell_activate_drift_spart(struct cell *c, struct scheduler *s);
+void cell_activate_hydro_sorts(struct cell *c, int sid, struct scheduler *s);
+void cell_activate_stars_sorts(struct cell *c, int sid, struct scheduler *s);
+void cell_activate_limiter(struct cell *c, struct scheduler *s);
 void cell_clear_drift_flags(struct cell *c, void *data);
+void cell_clear_limiter_flags(struct cell *c, void *data);
 void cell_set_super_mapper(void *map_data, int num_elements, void *extra_data);
+void cell_check_spart_pos(const struct cell *c,
+                          const struct spart *global_sparts);
+void cell_clear_stars_sort_flags(struct cell *c, const int is_super);
 int cell_has_tasks(struct cell *c);
+void cell_remove_part(const struct engine *e, struct cell *c, struct part *p,
+                      struct xpart *xp);
+void cell_remove_gpart(const struct engine *e, struct cell *c,
+                       struct gpart *gp);
+void cell_remove_spart(const struct engine *e, struct cell *c,
+                       struct spart *sp);
+struct spart *cell_add_spart(struct engine *e, struct cell *c);
+struct gpart *cell_convert_part_to_gpart(const struct engine *e, struct cell *c,
+                                         struct part *p, struct xpart *xp);
+struct gpart *cell_convert_spart_to_gpart(const struct engine *e,
+                                          struct cell *c, struct spart *sp);
+struct spart *cell_convert_part_to_spart(struct engine *e, struct cell *c,
+                                         struct part *p, struct xpart *xp);
+void cell_reorder_extra_parts(struct cell *c, const ptrdiff_t parts_offset);
+void cell_reorder_extra_gparts(struct cell *c, struct part *parts,
+                               struct spart *sparts);
+void cell_reorder_extra_sparts(struct cell *c, const ptrdiff_t sparts_offset);
 int cell_can_use_pair_mm(const struct cell *ci, const struct cell *cj,
                          const struct engine *e, const struct space *s);
+int cell_can_use_pair_mm_rebuild(const struct cell *ci, const struct cell *cj,
+                                 const struct engine *e, const struct space *s);
+
+/**
+ * @brief Compute the square of the minimal distance between any two points in
+ * two cells of the same size
+ *
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ * @param periodic Are we using periodic BCs?
+ * @param dim The dimensions of the simulation volume
+ */
+__attribute__((always_inline)) INLINE static double cell_min_dist2_same_size(
+    const struct cell *restrict ci, const struct cell *restrict cj,
+    const int periodic, const double dim[3]) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ci->width[0] != cj->width[0]) error("Cells of different size!");
+  if (ci->width[1] != cj->width[1]) error("Cells of different size!");
+  if (ci->width[2] != cj->width[2]) error("Cells of different size!");
+#endif
+
+  const double cix_min = ci->loc[0];
+  const double ciy_min = ci->loc[1];
+  const double ciz_min = ci->loc[2];
+  const double cjx_min = cj->loc[0];
+  const double cjy_min = cj->loc[1];
+  const double cjz_min = cj->loc[2];
+
+  const double cix_max = ci->loc[0] + ci->width[0];
+  const double ciy_max = ci->loc[1] + ci->width[1];
+  const double ciz_max = ci->loc[2] + ci->width[2];
+  const double cjx_max = cj->loc[0] + cj->width[0];
+  const double cjy_max = cj->loc[1] + cj->width[1];
+  const double cjz_max = cj->loc[2] + cj->width[2];
+
+  if (periodic) {
+
+    const double dx = min4(fabs(nearest(cix_min - cjx_min, dim[0])),
+                           fabs(nearest(cix_min - cjx_max, dim[0])),
+                           fabs(nearest(cix_max - cjx_min, dim[0])),
+                           fabs(nearest(cix_max - cjx_max, dim[0])));
+
+    const double dy = min4(fabs(nearest(ciy_min - cjy_min, dim[1])),
+                           fabs(nearest(ciy_min - cjy_max, dim[1])),
+                           fabs(nearest(ciy_max - cjy_min, dim[1])),
+                           fabs(nearest(ciy_max - cjy_max, dim[1])));
+
+    const double dz = min4(fabs(nearest(ciz_min - cjz_min, dim[2])),
+                           fabs(nearest(ciz_min - cjz_max, dim[2])),
+                           fabs(nearest(ciz_max - cjz_min, dim[2])),
+                           fabs(nearest(ciz_max - cjz_max, dim[2])));
+
+    return dx * dx + dy * dy + dz * dz;
+
+  } else {
+
+    const double dx = min(fabs(cix_max - cjx_min), fabs(cix_min - cjx_max));
+    const double dy = min(fabs(ciy_max - cjy_min), fabs(ciy_min - cjy_max));
+    const double dz = min(fabs(ciz_max - cjz_min), fabs(ciz_min - cjz_max));
+
+    return dx * dx + dy * dy + dz * dz;
+  }
+}
 
 /* Inlined functions (for speed). */
 
@@ -538,8 +874,8 @@ cell_can_recurse_in_pair_hydro_task(const struct cell *c) {
   /* If so, is the cut-off radius plus the max distance the parts have moved */
   /* smaller than the sub-cell sizes ? */
   /* Note: We use the _old values as these might have been updated by a drift */
-  return c->split &&
-         ((kernel_gamma * c->h_max_old + c->dx_max_old) < 0.5f * c->dmin);
+  return c->split && ((kernel_gamma * c->hydro.h_max_old +
+                       c->hydro.dx_max_part_old) < 0.5f * c->dmin);
 }
 
 /**
@@ -552,7 +888,43 @@ __attribute__((always_inline)) INLINE static int
 cell_can_recurse_in_self_hydro_task(const struct cell *c) {
 
   /* Is the cell split and not smaller than the smoothing length? */
-  return c->split && (kernel_gamma * c->h_max_old < 0.5f * c->dmin);
+  return c->split && (kernel_gamma * c->hydro.h_max_old < 0.5f * c->dmin);
+}
+
+/**
+ * @brief Can a sub-pair star task recurse to a lower level based
+ * on the status of the particles in the cell.
+ *
+ * @param ci The #cell with stars.
+ * @param cj The #cell with hydro parts.
+ */
+__attribute__((always_inline)) INLINE static int
+cell_can_recurse_in_pair_stars_task(const struct cell *ci,
+                                    const struct cell *cj) {
+
+  /* Is the cell split ? */
+  /* If so, is the cut-off radius plus the max distance the parts have moved */
+  /* smaller than the sub-cell sizes ? */
+  /* Note: We use the _old values as these might have been updated by a drift */
+  return ci->split && cj->split &&
+         ((kernel_gamma * ci->stars.h_max_old + ci->stars.dx_max_part_old) <
+          0.5f * ci->dmin) &&
+         ((kernel_gamma * cj->hydro.h_max_old + cj->hydro.dx_max_part_old) <
+          0.5f * cj->dmin);
+}
+
+/**
+ * @brief Can a sub-self stars task recurse to a lower level based
+ * on the status of the particles in the cell.
+ *
+ * @param c The #cell.
+ */
+__attribute__((always_inline)) INLINE static int
+cell_can_recurse_in_self_stars_task(const struct cell *c) {
+
+  /* Is the cell split and not smaller than the smoothing length? */
+  return c->split && (kernel_gamma * c->stars.h_max_old < 0.5f * c->dmin) &&
+         (kernel_gamma * c->hydro.h_max_old < 0.5f * c->dmin);
 }
 
 /**
@@ -569,7 +941,9 @@ __attribute__((always_inline)) INLINE static int cell_can_split_pair_hydro_task(
   /* the sub-cell sizes ? */
   /* Note that since tasks are create after a rebuild no need to take */
   /* into account any part motion (i.e. dx_max == 0 here) */
-  return c->split && (space_stretch * kernel_gamma * c->h_max < 0.5f * c->dmin);
+  return c->split &&
+         (space_stretch * kernel_gamma * c->hydro.h_max < 0.5f * c->dmin) &&
+         (space_stretch * kernel_gamma * c->stars.h_max < 0.5f * c->dmin);
 }
 
 /**
@@ -586,7 +960,9 @@ __attribute__((always_inline)) INLINE static int cell_can_split_self_hydro_task(
   /* the sub-cell sizes ? */
   /* Note: No need for more checks here as all the sub-pairs and sub-self */
   /* tasks will be created. So no need to check for h_max */
-  return c->split && (space_stretch * kernel_gamma * c->h_max < 0.5f * c->dmin);
+  return c->split &&
+         (space_stretch * kernel_gamma * c->hydro.h_max < 0.5f * c->dmin) &&
+         (space_stretch * kernel_gamma * c->stars.h_max < 0.5f * c->dmin);
 }
 
 /**
@@ -598,8 +974,8 @@ __attribute__((always_inline)) INLINE static int cell_can_split_self_hydro_task(
 __attribute__((always_inline)) INLINE static int
 cell_can_split_pair_gravity_task(const struct cell *c) {
 
-  /* Is the cell split ? */
-  return c->split && c->depth < space_subdepth_grav;
+  /* Is the cell split and still far from the leaves ? */
+  return c->split && ((c->maxdepth - c->depth) > space_subdepth_diff_grav);
 }
 
 /**
@@ -611,40 +987,70 @@ cell_can_split_pair_gravity_task(const struct cell *c) {
 __attribute__((always_inline)) INLINE static int
 cell_can_split_self_gravity_task(const struct cell *c) {
 
-  /* Is the cell split ? */
-  return c->split && c->depth < space_subdepth_grav;
+  /* Is the cell split and still far from the leaves ? */
+  return c->split && ((c->maxdepth - c->depth) > space_subdepth_diff_grav);
 }
 
 /**
- * @brief Have particles in a pair of cells moved too much and require a rebuild
+ * @brief Have gas particles in a pair of cells moved too much and require a
+ * rebuild
  * ?
  *
  * @param ci The first #cell.
  * @param cj The second #cell.
  */
-__attribute__((always_inline)) INLINE static int cell_need_rebuild_for_pair(
-    const struct cell *ci, const struct cell *cj) {
+__attribute__((always_inline)) INLINE static int
+cell_need_rebuild_for_hydro_pair(const struct cell *ci, const struct cell *cj) {
 
   /* Is the cut-off radius plus the max distance the parts in both cells have */
   /* moved larger than the cell size ? */
   /* Note ci->dmin == cj->dmin */
-  return (kernel_gamma * max(ci->h_max, cj->h_max) + ci->dx_max_part +
-              cj->dx_max_part >
-          cj->dmin);
+  if (kernel_gamma * max(ci->hydro.h_max, cj->hydro.h_max) +
+          ci->hydro.dx_max_part + cj->hydro.dx_max_part >
+      cj->dmin) {
+    return 1;
+  }
+  return 0;
+}
+/**
+ * @brief Have star particles in a pair of cells moved too much and require a
+ * rebuild?
+ *
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ */
+__attribute__((always_inline)) INLINE static int
+cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
+
+  /* Is the cut-off radius plus the max distance the parts in both cells have */
+  /* moved larger than the cell size ? */
+  /* Note ci->dmin == cj->dmin */
+  if (kernel_gamma * max(ci->stars.h_max, cj->hydro.h_max) +
+          ci->stars.dx_max_part + cj->hydro.dx_max_part >
+      cj->dmin) {
+    return 1;
+  }
+  return 0;
 }
 
 /**
- * @brief Add a unique tag to a cell.
+ * @brief Add a unique tag to a cell, mostly for MPI communications.
+ *
+ * This function locks the cell so that tags can be added concurrently.
+ *
+ * @param c The #cell to tag.
  */
-__attribute((always_inline)) INLINE static void cell_tag(struct cell *c) {
+__attribute__((always_inline)) INLINE static void cell_ensure_tagged(
+    struct cell *c) {
 #ifdef WITH_MPI
 
-#ifdef SWIFT_DEBUG_CHECKS
-  if (c->tag > 0) error("setting tag for already tagged cell");
-#endif
-
-  if (c->tag < 0 && (c->tag = atomic_inc(&cell_next_tag)) > cell_max_tag)
+  lock_lock(&c->hydro.lock);
+  if (c->mpi.tag < 0 &&
+      (c->mpi.tag = atomic_inc(&cell_next_tag)) > cell_max_tag)
     error("Ran out of cell tags.");
+  if (lock_unlock(&c->hydro.lock) != 0) {
+    error("Failed to unlock cell.");
+  }
 #else
   error("SWIFT was not compiled with MPI enabled.");
 #endif  // WITH_MPI
diff --git a/src/chemistry/EAGLE/chemistry.h b/src/chemistry/EAGLE/chemistry.h
index 7f8a672669e1c5b1f8997ecf5971c63efee7522f..7cb61d11fc5578da4cf545448c7fdc2e6b0b12ed 100644
--- a/src/chemistry/EAGLE/chemistry.h
+++ b/src/chemistry/EAGLE/chemistry.h
@@ -60,7 +60,17 @@ chemistry_get_element_name(enum chemistry_element elem) {
  * @param cd #chemistry_global_data containing chemistry informations.
  */
 __attribute__((always_inline)) INLINE static void chemistry_init_part(
-    struct part* restrict p, const struct chemistry_global_data* cd) {}
+    struct part* restrict p, const struct chemistry_global_data* cd) {
+
+  struct chemistry_part_data* cpd = &p->chemistry_data;
+
+  for (int i = 0; i < chemistry_element_count; i++) {
+    cpd->smoothed_metal_mass_fraction[i] = 0.f;
+  }
+
+  cpd->smoothed_metal_mass_fraction_total = 0.f;
+  cpd->smoothed_iron_mass_fraction_from_SNIa = 0.f;
+}
 
 /**
  * @brief Finishes the smooth metal calculation.
@@ -76,7 +86,35 @@ __attribute__((always_inline)) INLINE static void chemistry_init_part(
  */
 __attribute__((always_inline)) INLINE static void chemistry_end_density(
     struct part* restrict p, const struct chemistry_global_data* cd,
-    const struct cosmology* cosmo) {}
+    const struct cosmology* cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float factor = pow_dimension(h_inv) / p->rho; /* 1 / h^d * rho */
+  const float m = hydro_get_mass(p);
+
+  struct chemistry_part_data* cpd = &p->chemistry_data;
+
+  for (int i = 0; i < chemistry_element_count; i++) {
+    /* Final operation on the density (add self-contribution). */
+    cpd->smoothed_metal_mass_fraction[i] +=
+        m * cpd->metal_mass_fraction[i] * kernel_root;
+
+    /* Finish the calculation by inserting the missing h-factors */
+    cpd->smoothed_metal_mass_fraction[i] *= factor;
+  }
+
+  /* Smooth mass fraction of all metals */
+  cpd->smoothed_metal_mass_fraction_total +=
+      m * cpd->metal_mass_fraction_total * kernel_root;
+  cpd->smoothed_metal_mass_fraction_total *= factor;
+
+  /* Smooth iron mass fraction from SNIa */
+  cpd->smoothed_iron_mass_fraction_from_SNIa +=
+      m * cpd->iron_mass_fraction_from_SNIa * kernel_root;
+  cpd->smoothed_iron_mass_fraction_from_SNIa *= factor;
+}
 
 /**
  * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
@@ -91,7 +129,21 @@ chemistry_part_has_no_neighbours(struct part* restrict p,
                                  struct xpart* restrict xp,
                                  const struct chemistry_global_data* cd,
                                  const struct cosmology* cosmo) {
-  error("Needs implementing!");
+
+  /* Just make all the smoothed fields default to the un-smoothed values */
+  struct chemistry_part_data* cpd = &p->chemistry_data;
+
+  /* Total metal mass fraction */
+  cpd->smoothed_metal_mass_fraction_total = cpd->metal_mass_fraction_total;
+
+  /* Iron frac from SNIa */
+  cpd->smoothed_iron_mass_fraction_from_SNIa =
+      cpd->iron_mass_fraction_from_SNIa;
+
+  /* Individual metal mass fractions */
+  for (int i = 0; i < chemistry_element_count; i++) {
+    cpd->smoothed_metal_mass_fraction[i] = cpd->metal_mass_fraction[i];
+  }
 }
 
 /**
@@ -112,11 +164,16 @@ __attribute__((always_inline)) INLINE static void chemistry_first_init_part(
     const struct chemistry_global_data* data, struct part* restrict p,
     struct xpart* restrict xp) {
 
-  p->chemistry_data.metal_mass_fraction_total =
-      data->initial_metal_mass_fraction_total;
-  for (int elem = 0; elem < chemistry_element_count; ++elem)
-    p->chemistry_data.metal_mass_fraction[elem] =
-        data->initial_metal_mass_fraction[elem];
+  // Add initialization of all other fields in chemistry_part_data struct.
+  if (data->initial_metal_mass_fraction_total != -1) {
+    p->chemistry_data.metal_mass_fraction_total =
+        data->initial_metal_mass_fraction_total;
+
+    for (int elem = 0; elem < chemistry_element_count; ++elem)
+      p->chemistry_data.metal_mass_fraction[elem] =
+          data->initial_metal_mass_fraction[elem];
+  }
+  chemistry_init_part(p, data);
 }
 
 /**
@@ -133,24 +190,20 @@ static INLINE void chemistry_init_backend(struct swift_params* parameter_file,
                                           struct chemistry_global_data* data) {
 
   /* Read the total metallicity */
-  data->initial_metal_mass_fraction_total =
-      parser_get_param_float(parameter_file, "EAGLEChemistry:InitMetallicity");
-
-  /* Read the individual mass fractions */
-  for (int elem = 0; elem < chemistry_element_count; ++elem) {
-    char buffer[50];
-    sprintf(buffer, "EAGLEChemistry:InitAbundance_%s",
-            chemistry_get_element_name((enum chemistry_element)elem));
-
-    data->initial_metal_mass_fraction[elem] =
-        parser_get_param_float(parameter_file, buffer);
+  data->initial_metal_mass_fraction_total = parser_get_opt_param_float(
+      parameter_file, "EAGLEChemistry:init_abundance_metal", -1);
+
+  if (data->initial_metal_mass_fraction_total != -1) {
+    /* Read the individual mass fractions */
+    for (int elem = 0; elem < chemistry_element_count; ++elem) {
+      char buffer[50];
+      sprintf(buffer, "EAGLEChemistry:init_abundance_%s",
+              chemistry_get_element_name((enum chemistry_element)elem));
+
+      data->initial_metal_mass_fraction[elem] =
+          parser_get_param_float(parameter_file, buffer);
+    }
   }
-
-  /* Read the constant ratios */
-  data->calcium_over_silicon_ratio = parser_get_param_float(
-      parameter_file, "EAGLEChemistry:CalciumOverSilicon");
-  data->sulphur_over_silicon_ratio = parser_get_param_float(
-      parameter_file, "EAGLEChemistry:SulphurOverSilicon");
 }
 
 /**
diff --git a/src/chemistry/EAGLE/chemistry_iact.h b/src/chemistry/EAGLE/chemistry_iact.h
index bdbb8ac9bf7d260e29468b8bee0a84416b668d6a..573291637d66e39a973e662edec084d3a4687050 100644
--- a/src/chemistry/EAGLE/chemistry_iact.h
+++ b/src/chemistry/EAGLE/chemistry_iact.h
@@ -39,7 +39,49 @@
  */
 __attribute__((always_inline)) INLINE static void runner_iact_chemistry(
     float r2, const float *dx, float hi, float hj, struct part *restrict pi,
-    struct part *restrict pj, float a, float H) {}
+    struct part *restrict pj, float a, float H) {
+
+  struct chemistry_part_data *chi = &pi->chemistry_data;
+  struct chemistry_part_data *chj = &pj->chemistry_data;
+
+  float wi;
+  float wj;
+
+  /* Get the masses. */
+  const float mi = hydro_get_mass(pi);
+  const float mj = hydro_get_mass(pj);
+
+  /* Get r */
+  const float r = sqrtf(r2);
+
+  /* Compute the kernel function for pi */
+  const float ui = r / hi;
+  kernel_eval(ui, &wi);
+
+  /* Compute the kernel function for pj */
+  const float uj = r / hj;
+  kernel_eval(uj, &wj);
+
+  /* Compute contribution to the smooth metallicity */
+  for (int i = 0; i < chemistry_element_count; i++) {
+    chi->smoothed_metal_mass_fraction[i] +=
+        mj * chj->metal_mass_fraction[i] * wi;
+    chj->smoothed_metal_mass_fraction[i] +=
+        mi * chi->metal_mass_fraction[i] * wj;
+  }
+
+  // Smooth metal mass fraction of all metals
+  chi->smoothed_metal_mass_fraction_total +=
+      mj * chj->metal_mass_fraction_total * wi;
+  chj->smoothed_metal_mass_fraction_total +=
+      mi * chi->metal_mass_fraction_total * wj;
+
+  // Smooth iron mass fraction from SNIa
+  chi->smoothed_iron_mass_fraction_from_SNIa +=
+      mj * chj->iron_mass_fraction_from_SNIa * wi;
+  chj->smoothed_iron_mass_fraction_from_SNIa +=
+      mi * chi->iron_mass_fraction_from_SNIa * wj;
+}
 
 /**
  * @brief do chemistry computation after the runner_iact_density (non symmetric
@@ -56,6 +98,36 @@ __attribute__((always_inline)) INLINE static void runner_iact_chemistry(
  */
 __attribute__((always_inline)) INLINE static void runner_iact_nonsym_chemistry(
     float r2, const float *dx, float hi, float hj, struct part *restrict pi,
-    const struct part *restrict pj, float a, float H) {}
+    const struct part *restrict pj, float a, float H) {
+
+  struct chemistry_part_data *chi = &pi->chemistry_data;
+  const struct chemistry_part_data *chj = &pj->chemistry_data;
+
+  float wi;
+
+  /* Get the masses. */
+  const float mj = hydro_get_mass(pj);
+
+  /* Get r */
+  const float r = sqrtf(r2);
+
+  /* Compute the kernel function for pi */
+  const float ui = r / hi;
+  kernel_eval(ui, &wi);
+
+  /* Compute contribution to the smooth metallicity */
+  for (int i = 0; i < chemistry_element_count; i++) {
+    chi->smoothed_metal_mass_fraction[i] +=
+        mj * chj->metal_mass_fraction[i] * wi;
+  }
+
+  // Smooth metal mass fraction of all metals
+  chi->smoothed_metal_mass_fraction_total +=
+      mj * chj->metal_mass_fraction_total * wi;
+
+  // Smooth iron mass fraction from SNIa
+  chi->smoothed_iron_mass_fraction_from_SNIa +=
+      mj * chj->iron_mass_fraction_from_SNIa * wi;
+}
 
 #endif /* SWIFT_EAGLE_CHEMISTRY_IACT_H */
diff --git a/src/chemistry/EAGLE/chemistry_io.h b/src/chemistry/EAGLE/chemistry_io.h
index d78a5f19a52e92426d5eb1f8575abe2b564e32ac..62de85d47329a588d658cc61db72510337988638 100644
--- a/src/chemistry/EAGLE/chemistry_io.h
+++ b/src/chemistry/EAGLE/chemistry_io.h
@@ -32,9 +32,18 @@
  */
 INLINE static int chemistry_read_particles(struct part* parts,
                                            struct io_props* list) {
-
-  /* Nothing to read */
-  return 0;
+  /* List what we want to read */
+  list[0] = io_make_input_field(
+      "ElementAbundance", FLOAT, chemistry_element_count, OPTIONAL,
+      UNIT_CONV_NO_UNITS, parts, chemistry_data.metal_mass_fraction);
+  list[1] =
+      io_make_input_field("Metallicity", FLOAT, 1, OPTIONAL, UNIT_CONV_NO_UNITS,
+                          parts, chemistry_data.metal_mass_fraction_total);
+  list[2] = io_make_input_field("IronMassFracFromSNIa", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_NO_UNITS, parts,
+                                chemistry_data.iron_mass_fraction_from_SNIa);
+
+  return 3;
 }
 
 /**
@@ -97,6 +106,66 @@ INLINE static int chemistry_write_particles(const struct part* parts,
   return 12;
 }
 
+/**
+ * @brief Specifies which star particle fields to write to a dataset
+ *
+ * @param sparts The star particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int chemistry_write_sparticles(const struct spart* sparts,
+                                             struct io_props* list) {
+
+  /* List what we want to write */
+  list[0] = io_make_output_field("ElementAbundance", FLOAT,
+                                 chemistry_element_count, UNIT_CONV_NO_UNITS,
+                                 sparts, chemistry_data.metal_mass_fraction);
+
+  list[1] = io_make_output_field(
+      "SmoothedElementAbundance", FLOAT, chemistry_element_count,
+      UNIT_CONV_NO_UNITS, sparts, chemistry_data.smoothed_metal_mass_fraction);
+
+  list[2] =
+      io_make_output_field("Metallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts,
+                           chemistry_data.metal_mass_fraction_total);
+
+  list[3] = io_make_output_field(
+      "SmoothedMetallicity", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts,
+      chemistry_data.smoothed_metal_mass_fraction_total);
+
+  list[4] = io_make_output_field("TotalMassFromSNIa", FLOAT, 1, UNIT_CONV_MASS,
+                                 sparts, chemistry_data.mass_from_SNIa);
+
+  list[5] = io_make_output_field("MetalMassFracFromSNIa", FLOAT, 1,
+                                 UNIT_CONV_NO_UNITS, sparts,
+                                 chemistry_data.metal_mass_fraction_from_SNIa);
+
+  list[6] = io_make_output_field("TotalMassFromAGB", FLOAT, 1, UNIT_CONV_MASS,
+                                 sparts, chemistry_data.mass_from_AGB);
+
+  list[7] =
+      io_make_output_field("MetalMassFracFromAGB", FLOAT, 1, UNIT_CONV_NO_UNITS,
+                           sparts, chemistry_data.metal_mass_fraction_from_AGB);
+
+  list[8] = io_make_output_field("TotalMassFromSNII", FLOAT, 1, UNIT_CONV_MASS,
+                                 sparts, chemistry_data.mass_from_SNII);
+
+  list[9] = io_make_output_field("MetalMassFracFromSNII", FLOAT, 1,
+                                 UNIT_CONV_NO_UNITS, sparts,
+                                 chemistry_data.metal_mass_fraction_from_SNII);
+
+  list[10] =
+      io_make_output_field("IronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS,
+                           sparts, chemistry_data.iron_mass_fraction_from_SNIa);
+
+  list[11] = io_make_output_field(
+      "SmoothedIronMassFracFromSNIa", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts,
+      chemistry_data.smoothed_iron_mass_fraction_from_SNIa);
+
+  return 12;
+}
+
 #ifdef HAVE_HDF5
 
 /**
diff --git a/src/chemistry/EAGLE/chemistry_struct.h b/src/chemistry/EAGLE/chemistry_struct.h
index 9093709e62d0af638ae485bd1154a4537791e84a..f5e47347f8b6a910640624ddfc0b5968242eedf6 100644
--- a/src/chemistry/EAGLE/chemistry_struct.h
+++ b/src/chemistry/EAGLE/chemistry_struct.h
@@ -45,12 +45,6 @@ struct chemistry_global_data {
 
   /*! Fraction of the particle mass in *all* metals at the start of the run */
   float initial_metal_mass_fraction_total;
-
-  /*! Constant ratio of Calcium over Silicium */
-  float calcium_over_silicon_ratio;
-
-  /*! Constant ratio of Calcium over Silicium */
-  float sulphur_over_silicon_ratio;
 };
 
 /**
diff --git a/src/chemistry/GEAR/chemistry_io.h b/src/chemistry/GEAR/chemistry_io.h
index 2a0847bebfb8c1734f21bda2f6ad55b354a7aec9..b29f7db65d3ab7dce4b0dcafed06c97a9e621bfe 100644
--- a/src/chemistry/GEAR/chemistry_io.h
+++ b/src/chemistry/GEAR/chemistry_io.h
@@ -87,6 +87,32 @@ INLINE static int chemistry_write_particles(const struct part* parts,
   return 3;
 }
 
+/**
+ * @brief Specifies which sparticle fields to write to a dataset
+ *
+ * @param sparts The sparticle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int chemistry_write_sparticles(const struct spart* sparts,
+                                             struct io_props* list) {
+
+  /* List what we want to write */
+  list[0] = io_make_output_field(
+      "SmoothedElementAbundance", FLOAT, chemistry_element_count,
+      UNIT_CONV_NO_UNITS, sparts, chemistry_data.smoothed_metal_mass_fraction);
+
+  list[1] = io_make_output_field("Z", FLOAT, 1, UNIT_CONV_NO_UNITS, sparts,
+                                 chemistry_data.Z);
+
+  list[2] = io_make_output_field("ElementAbundance", FLOAT,
+                                 chemistry_element_count, UNIT_CONV_NO_UNITS,
+                                 sparts, chemistry_data.metal_mass_fraction);
+
+  return 3;
+}
+
 #ifdef HAVE_HDF5
 
 /**
diff --git a/src/chemistry/none/chemistry_io.h b/src/chemistry/none/chemistry_io.h
index ef7e0d8d87dfeab5978f0e86bbf6279f7901d10a..c6e5b7b769cec667fee8c3fc674f3b4aa35929c1 100644
--- a/src/chemistry/none/chemistry_io.h
+++ b/src/chemistry/none/chemistry_io.h
@@ -55,10 +55,27 @@ INLINE static int chemistry_write_particles(const struct part* parts,
   return 0;
 }
 
+/**
+ * @brief Specifies which sparticle fields to write to a dataset
+ *
+ * @param sparts The sparticle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+INLINE static int chemistry_write_sparticles(const struct spart* sparts,
+                                             struct io_props* list) {
+
+  /* update list according to hydro_io */
+
+  /* Return the number of fields to write */
+  return 0;
+}
+
 #ifdef HAVE_HDF5
 
 /**
- * @brief Writes the current model of SPH to the file
+ * @brief Writes the current model of chemistry to the file
  * @param h_grp The HDF5 group in which to write
  */
 INLINE static void chemistry_write_flavour(hid_t h_grp) {
diff --git a/src/clocks.c b/src/clocks.c
index cac0131acade08e41ee7ed4a22fabde49e197060..49297f5db1cc10a3d9f4537c5900610dded7ffba 100644
--- a/src/clocks.c
+++ b/src/clocks.c
@@ -29,6 +29,7 @@
 #include "../config.h"
 
 /* Standard headers. */
+#include <limits.h>
 #include <stdio.h>
 #include <unistd.h>
 
@@ -262,6 +263,17 @@ const char *clocks_get_timesincestart(void) {
   return buffer;
 }
 
+/**
+ * Returns the wall-clock time since the start of execution in hours.
+ *
+ * Need to call clocks_set_cpufreq() to mark the start of execution.
+ *
+ * @result the time since the start of the execution
+ */
+double clocks_get_hours_since_start(void) {
+  return clocks_diff_ticks(getticks(), clocks_start) / (3600. * 1000.0);
+}
+
 /**
  * @brief return the cpu time used.
  *
@@ -280,3 +292,23 @@ double clocks_get_cputime_used(void) {
   times(&tmstic);
   return (double)(tmstic.tms_utime + tmstic.tms_cutime);
 }
+
+/**
+ * @brief Return an integer based on the current time.
+ *
+ * Normally this will be the remainder of the current number of nanoseconds
+ * so not very dissimilar in the most significant figures unless the time
+ * between calls is greater than INT_MAX nanoseconds. For faster calls use
+ * fewer figures, if that matters.
+ *
+ * @result an integer.
+ */
+int clocks_random_seed(void) {
+#ifdef HAVE_CLOCK_GETTIME
+  struct timespec timespec;
+  clock_gettime(CLOCK_REALTIME, &timespec);
+  return (timespec.tv_nsec % INT_MAX);
+#else
+  return (getticks() % INT_MAX);
+#endif
+}
diff --git a/src/clocks.h b/src/clocks.h
index f3901584774c7586d6a68b4415d6b443cb53c466..ce08167bd504d47a76542870791057881c6d2f17 100644
--- a/src/clocks.h
+++ b/src/clocks.h
@@ -19,8 +19,13 @@
 #ifndef SWIFT_CLOCKS_H
 #define SWIFT_CLOCKS_H
 
+/* Config parameters. */
+#include "../config.h"
+
+/* System includes. */
 #include <sys/times.h>
-#include <time.h>
+
+/* Local includes */
 #include "cycle.h"
 
 /* Struct to record a time for the clocks functions. */
@@ -42,7 +47,9 @@ double clocks_from_ticks(ticks tics);
 ticks clocks_to_ticks(double interval);
 double clocks_diff_ticks(ticks tic, ticks toc);
 const char *clocks_get_timesincestart(void);
+double clocks_get_hours_since_start(void);
 
 double clocks_get_cputime_used(void);
+int clocks_random_seed(void);
 
 #endif /* SWIFT_CLOCKS_H */
diff --git a/src/collectgroup.c b/src/collectgroup.c
index c83d7bef3f03e672e8b5c9036e5daaab26b5d190..ddf3e35d945fd8b07cc927d8ba383963c7558cd2 100644
--- a/src/collectgroup.c
+++ b/src/collectgroup.c
@@ -36,10 +36,14 @@
 
 /* Local collections for MPI reduces. */
 struct mpicollectgroup1 {
-  long long updates, g_updates, s_updates;
+  long long updated, g_updated, s_updated;
+  long long inhibited, g_inhibited, s_inhibited;
   integertime_t ti_hydro_end_min;
   integertime_t ti_gravity_end_min;
   int forcerebuild;
+  long long total_nr_cells;
+  long long total_nr_tasks;
+  float tasks_per_cell_max;
 };
 
 /* Forward declarations. */
@@ -85,45 +89,64 @@ void collectgroup1_apply(struct collectgroup1 *grp1, struct engine *e) {
   e->ti_end_min = min(e->ti_hydro_end_min, e->ti_gravity_end_min);
   e->ti_end_max = max(e->ti_hydro_end_max, e->ti_gravity_end_max);
   e->ti_beg_max = max(e->ti_hydro_beg_max, e->ti_gravity_beg_max);
-  e->updates = grp1->updates;
-  e->g_updates = grp1->g_updates;
-  e->s_updates = grp1->s_updates;
+  e->updates = grp1->updated;
+  e->g_updates = grp1->g_updated;
+  e->s_updates = grp1->s_updated;
+  e->nr_inhibited_parts = grp1->inhibited;
+  e->nr_inhibited_gparts = grp1->g_inhibited;
+  e->nr_inhibited_sparts = grp1->s_inhibited;
   e->forcerebuild = grp1->forcerebuild;
+  e->total_nr_cells = grp1->total_nr_cells;
+  e->total_nr_tasks = grp1->total_nr_tasks;
+  e->tasks_per_cell_max = grp1->tasks_per_cell_max;
 }
 
 /**
  * @brief Initialises a collectgroup1 struct ready for processing.
  *
  * @param grp1 The #collectgroup1 to initialise
- * @param updates the number of updated hydro particles on this node this step.
- * @param g_updates the number of updated gravity particles on this node this
- * step.
- * @param s_updates the number of updated star particles on this node this step.
+ * @param updated the number of updated hydro particles on this node this step.
+ * @param g_updated the number of updated gravity particles on this node this
+ *                  step.
+ * @param s_updated the number of updated star particles on this node this step.
+ * @param inhibited the number of inhibited hydro particles on this node this
+ *                  step.
+ * @param g_inhibited the number of inhibited gravity particles on this node
+ *                    this step.
+ * @param s_inhibited the number of inhibited star particles on this node this
+ *                    step.
  * @param ti_hydro_end_min the minimum end time for next hydro time step after
- * this step.
+ *                         this step.
  * @param ti_hydro_end_max the maximum end time for next hydro time step after
- * this step.
+ *                         this step.
  * @param ti_hydro_beg_max the maximum begin time for next hydro time step after
- * this step.
+ *                         this step.
  * @param ti_gravity_end_min the minimum end time for next gravity time step
- * after this step.
+ *                           after this step.
  * @param ti_gravity_end_max the maximum end time for next gravity time step
- * after this step.
+ *                           after this step.
  * @param ti_gravity_beg_max the maximum begin time for next gravity time step
- * after this step.
+ *                           after this step.
  * @param forcerebuild whether a rebuild is required after this step.
+ * @param total_nr_cells total number of all cells on rank.
+ * @param total_nr_tasks total number of tasks on rank.
+ * @param tasks_per_cell the used number of tasks per cell.
  */
-void collectgroup1_init(struct collectgroup1 *grp1, size_t updates,
-                        size_t g_updates, size_t s_updates,
-                        integertime_t ti_hydro_end_min,
-                        integertime_t ti_hydro_end_max,
-                        integertime_t ti_hydro_beg_max,
-                        integertime_t ti_gravity_end_min,
-                        integertime_t ti_gravity_end_max,
-                        integertime_t ti_gravity_beg_max, int forcerebuild) {
-  grp1->updates = updates;
-  grp1->g_updates = g_updates;
-  grp1->s_updates = s_updates;
+void collectgroup1_init(
+    struct collectgroup1 *grp1, size_t updated, size_t g_updated,
+    size_t s_updated, size_t inhibited, size_t g_inhibited, size_t s_inhibited,
+    integertime_t ti_hydro_end_min, integertime_t ti_hydro_end_max,
+    integertime_t ti_hydro_beg_max, integertime_t ti_gravity_end_min,
+    integertime_t ti_gravity_end_max, integertime_t ti_gravity_beg_max,
+    int forcerebuild, long long total_nr_cells, long long total_nr_tasks,
+    float tasks_per_cell) {
+
+  grp1->updated = updated;
+  grp1->g_updated = g_updated;
+  grp1->s_updated = s_updated;
+  grp1->inhibited = inhibited;
+  grp1->g_inhibited = g_inhibited;
+  grp1->s_inhibited = s_inhibited;
   grp1->ti_hydro_end_min = ti_hydro_end_min;
   grp1->ti_hydro_end_max = ti_hydro_end_max;
   grp1->ti_hydro_beg_max = ti_hydro_beg_max;
@@ -131,6 +154,9 @@ void collectgroup1_init(struct collectgroup1 *grp1, size_t updates,
   grp1->ti_gravity_end_max = ti_gravity_end_max;
   grp1->ti_gravity_beg_max = ti_gravity_beg_max;
   grp1->forcerebuild = forcerebuild;
+  grp1->total_nr_cells = total_nr_cells;
+  grp1->total_nr_tasks = total_nr_tasks;
+  grp1->tasks_per_cell_max = tasks_per_cell;
 }
 
 /**
@@ -147,12 +173,18 @@ void collectgroup1_reduce(struct collectgroup1 *grp1) {
 
   /* Populate an MPI group struct and reduce this across all nodes. */
   struct mpicollectgroup1 mpigrp11;
-  mpigrp11.updates = grp1->updates;
-  mpigrp11.g_updates = grp1->g_updates;
-  mpigrp11.s_updates = grp1->s_updates;
+  mpigrp11.updated = grp1->updated;
+  mpigrp11.g_updated = grp1->g_updated;
+  mpigrp11.s_updated = grp1->s_updated;
+  mpigrp11.inhibited = grp1->inhibited;
+  mpigrp11.g_inhibited = grp1->g_inhibited;
+  mpigrp11.s_inhibited = grp1->s_inhibited;
   mpigrp11.ti_hydro_end_min = grp1->ti_hydro_end_min;
   mpigrp11.ti_gravity_end_min = grp1->ti_gravity_end_min;
   mpigrp11.forcerebuild = grp1->forcerebuild;
+  mpigrp11.total_nr_cells = grp1->total_nr_cells;
+  mpigrp11.total_nr_tasks = grp1->total_nr_tasks;
+  mpigrp11.tasks_per_cell_max = grp1->tasks_per_cell_max;
 
   struct mpicollectgroup1 mpigrp12;
   if (MPI_Allreduce(&mpigrp11, &mpigrp12, 1, mpicollectgroup1_type,
@@ -160,12 +192,18 @@ void collectgroup1_reduce(struct collectgroup1 *grp1) {
     error("Failed to reduce mpicollection1.");
 
   /* And update. */
-  grp1->updates = mpigrp12.updates;
-  grp1->g_updates = mpigrp12.g_updates;
-  grp1->s_updates = mpigrp12.s_updates;
+  grp1->updated = mpigrp12.updated;
+  grp1->g_updated = mpigrp12.g_updated;
+  grp1->s_updated = mpigrp12.s_updated;
+  grp1->inhibited = mpigrp12.inhibited;
+  grp1->g_inhibited = mpigrp12.g_inhibited;
+  grp1->s_inhibited = mpigrp12.s_inhibited;
   grp1->ti_hydro_end_min = mpigrp12.ti_hydro_end_min;
   grp1->ti_gravity_end_min = mpigrp12.ti_gravity_end_min;
   grp1->forcerebuild = mpigrp12.forcerebuild;
+  grp1->total_nr_cells = mpigrp12.total_nr_cells;
+  grp1->total_nr_tasks = mpigrp12.total_nr_tasks;
+  grp1->tasks_per_cell_max = mpigrp12.tasks_per_cell_max;
 
 #endif
 }
@@ -182,9 +220,14 @@ static void doreduce1(struct mpicollectgroup1 *mpigrp11,
 
   /* Do what is needed for each part of the collection. */
   /* Sum of updates. */
-  mpigrp11->updates += mpigrp12->updates;
-  mpigrp11->g_updates += mpigrp12->g_updates;
-  mpigrp11->s_updates += mpigrp12->s_updates;
+  mpigrp11->updated += mpigrp12->updated;
+  mpigrp11->g_updated += mpigrp12->g_updated;
+  mpigrp11->s_updated += mpigrp12->s_updated;
+
+  /* Sum of inhibited */
+  mpigrp11->inhibited += mpigrp12->inhibited;
+  mpigrp11->g_inhibited += mpigrp12->g_inhibited;
+  mpigrp11->s_inhibited += mpigrp12->s_inhibited;
 
   /* Minimum end time. */
   mpigrp11->ti_hydro_end_min =
@@ -195,6 +238,14 @@ static void doreduce1(struct mpicollectgroup1 *mpigrp11,
   /* Everyone must agree to not rebuild. */
   if (mpigrp11->forcerebuild || mpigrp12->forcerebuild)
     mpigrp11->forcerebuild = 1;
+
+  /* Totals of all tasks and cells. */
+  mpigrp11->total_nr_cells += mpigrp12->total_nr_cells;
+  mpigrp11->total_nr_tasks += mpigrp12->total_nr_tasks;
+
+  /* Maximum value of tasks_per_cell. */
+  mpigrp11->tasks_per_cell_max =
+      max(mpigrp11->tasks_per_cell_max, mpigrp12->tasks_per_cell_max);
 }
 
 /**
@@ -204,7 +255,7 @@ static void mpicollectgroup1_reduce(void *in, void *inout, int *len,
                                     MPI_Datatype *datatype) {
 
   for (int i = 0; i < *len; ++i)
-    doreduce1(&((struct mpicollectgroup1 *)inout)[0],
+    doreduce1(&((struct mpicollectgroup1 *)inout)[i],
               &((const struct mpicollectgroup1 *)in)[i]);
 }
 
diff --git a/src/collectgroup.h b/src/collectgroup.h
index 8bf8a9d1b75f9a5ddb3f19fa9cdb4103e044ea59..3e430b58db05b563f96149d1ae21039444a03640 100644
--- a/src/collectgroup.h
+++ b/src/collectgroup.h
@@ -35,7 +35,10 @@ struct engine;
 struct collectgroup1 {
 
   /* Number of particles updated */
-  long long updates, g_updates, s_updates;
+  long long updated, g_updated, s_updated;
+
+  /* Number of particles inhibited */
+  long long inhibited, g_inhibited, s_inhibited;
 
   /* Times for the time-step */
   integertime_t ti_hydro_end_min, ti_hydro_end_max, ti_hydro_beg_max;
@@ -43,18 +46,25 @@ struct collectgroup1 {
 
   /* Force the engine to rebuild? */
   int forcerebuild;
+
+  /* Totals of cells and tasks. */
+  long long total_nr_cells;
+  long long total_nr_tasks;
+
+  /* Maximum value of actual tasks per cell across all ranks. */
+  float tasks_per_cell_max;
 };
 
 void collectgroup_init(void);
 void collectgroup1_apply(struct collectgroup1 *grp1, struct engine *e);
-void collectgroup1_init(struct collectgroup1 *grp1, size_t updates,
-                        size_t g_updates, size_t s_updates,
-                        integertime_t ti_hydro_end_min,
-                        integertime_t ti_hydro_end_max,
-                        integertime_t ti_hydro_beg_max,
-                        integertime_t ti_gravity_end_min,
-                        integertime_t ti_gravity_end_max,
-                        integertime_t ti_gravity_beg_max, int forcerebuild);
+void collectgroup1_init(
+    struct collectgroup1 *grp1, size_t updated, size_t g_updated,
+    size_t s_updated, size_t inhibited, size_t g_inhibited, size_t s_inhibited,
+    integertime_t ti_hydro_end_min, integertime_t ti_hydro_end_max,
+    integertime_t ti_hydro_beg_max, integertime_t ti_gravity_end_min,
+    integertime_t ti_gravity_end_max, integertime_t ti_gravity_beg_max,
+    int forcerebuild, long long total_nr_cells, long long total_nr_tasks,
+    float tasks_per_cell);
 void collectgroup1_reduce(struct collectgroup1 *grp1);
 
 #endif /* SWIFT_COLLECTGROUP_H */
diff --git a/src/common_io.c b/src/common_io.c
index 68311107575a89ce8a2990a8e0f7a8eeb5d2d644..733cf1dacac5f0c73ea401a584e2aa40eadd4a23 100644
--- a/src/common_io.c
+++ b/src/common_io.c
@@ -142,7 +142,7 @@ void io_read_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type,
  * Calls #error() if an error occurs.
  */
 void io_write_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type,
-                        void* data, int num) {
+                        const void* data, int num) {
 
   const hid_t h_space = H5Screate(H5S_SIMPLE);
   if (h_space < 0)
@@ -358,6 +358,10 @@ void io_write_code_description(hid_t h_file) {
 #ifdef HAVE_METIS
   io_write_attribute_s(h_grpcode, "METIS library version", metis_version());
 #endif
+#ifdef HAVE_PARMETIS
+  io_write_attribute_s(h_grpcode, "ParMETIS library version",
+                       parmetis_version());
+#endif
 #else
   io_write_attribute_s(h_grpcode, "MPI library", "Non-MPI version of SWIFT");
 #endif
@@ -374,7 +378,7 @@ void io_write_engine_policy(hid_t h_file, const struct engine* e) {
   const hid_t h_grp = H5Gcreate1(h_file, "/Policy", 0);
   if (h_grp < 0) error("Error while creating policy group");
 
-  for (int i = 1; i <= engine_maxpolicy; ++i)
+  for (int i = 1; i < engine_maxpolicy; ++i)
     if (e->policy & (1 << i))
       io_write_attribute_i(h_grp, engine_policy_names[i + 1], 1);
     else
@@ -383,6 +387,332 @@ void io_write_engine_policy(hid_t h_file, const struct engine* e) {
   H5Gclose(h_grp);
 }
 
+void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
+                           const struct cell* cells_top, const int nr_cells,
+                           const double width[3], const int nodeID,
+                           const long long global_counts[swift_type_count],
+                           const long long global_offsets[swift_type_count],
+                           const struct unit_system* internal_units,
+                           const struct unit_system* snapshot_units) {
+
+  double cell_width[3] = {width[0], width[1], width[2]};
+
+  /* Temporary memory for the cell-by-cell information */
+  double* centres = NULL;
+  centres = (double*)malloc(3 * nr_cells * sizeof(double));
+
+  /* Count of particles in each cell */
+  long long *count_part = NULL, *count_gpart = NULL, *count_spart = NULL;
+  count_part = (long long*)malloc(nr_cells * sizeof(long long));
+  count_gpart = (long long*)malloc(nr_cells * sizeof(long long));
+  count_spart = (long long*)malloc(nr_cells * sizeof(long long));
+
+  /* Global offsets of particles in each cell */
+  long long *offset_part = NULL, *offset_gpart = NULL, *offset_spart = NULL;
+  offset_part = (long long*)malloc(nr_cells * sizeof(long long));
+  offset_gpart = (long long*)malloc(nr_cells * sizeof(long long));
+  offset_spart = (long long*)malloc(nr_cells * sizeof(long long));
+
+  /* Offsets of the 0^th element */
+  offset_part[0] = 0;
+  offset_gpart[0] = 0;
+  offset_spart[0] = 0;
+
+  /* Collect the cell information of *local* cells */
+  long long local_offset_part = 0;
+  long long local_offset_gpart = 0;
+  long long local_offset_spart = 0;
+  for (int i = 0; i < nr_cells; ++i) {
+
+    if (cells_top[i].nodeID == nodeID) {
+
+      /* Centre of each cell */
+      centres[i * 3 + 0] = cells_top[i].loc[0] + cell_width[0] * 0.5;
+      centres[i * 3 + 1] = cells_top[i].loc[1] + cell_width[1] * 0.5;
+      centres[i * 3 + 2] = cells_top[i].loc[2] + cell_width[2] * 0.5;
+
+      /* Count real particles that will be written */
+      count_part[i] = cells_top[i].hydro.count - cells_top[i].hydro.inhibited;
+      count_gpart[i] = cells_top[i].grav.count - cells_top[i].grav.inhibited;
+      count_spart[i] = cells_top[i].stars.count - cells_top[i].stars.inhibited;
+
+      /* Only count DM gpart (gpart without friends) */
+      count_gpart[i] -= count_part[i];
+      count_gpart[i] -= count_spart[i];
+
+      /* Offsets including the global offset of all particles on this MPI rank
+       */
+      offset_part[i] = local_offset_part + global_offsets[swift_type_gas];
+      offset_gpart[i] =
+          local_offset_gpart + global_offsets[swift_type_dark_matter];
+      offset_spart[i] = local_offset_spart + global_offsets[swift_type_stars];
+
+      local_offset_part += count_part[i];
+      local_offset_gpart += count_gpart[i];
+      local_offset_spart += count_spart[i];
+
+    } else {
+
+      /* Just zero everything for the foregin cells */
+
+      centres[i * 3 + 0] = 0.;
+      centres[i * 3 + 1] = 0.;
+      centres[i * 3 + 2] = 0.;
+
+      count_part[i] = 0;
+      count_gpart[i] = 0;
+      count_spart[i] = 0;
+
+      offset_part[i] = 0;
+      offset_gpart[i] = 0;
+      offset_spart[i] = 0;
+    }
+  }
+
+#ifdef WITH_MPI
+  /* Now, reduce all the arrays. Note that we use a bit-wise OR here. This
+     is safe as we made sure only local cells have non-zero values. */
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, count_part, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(count_part, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, count_gpart, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(count_gpart, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, count_spart, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(count_spart, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, offset_part, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(offset_part, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, offset_gpart, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(offset_gpart, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, offset_spart, nr_cells, MPI_LONG_LONG_INT, MPI_BOR,
+               0, MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(offset_spart, NULL, nr_cells, MPI_LONG_LONG_INT, MPI_BOR, 0,
+               MPI_COMM_WORLD);
+  }
+
+  /* For the centres we use a sum as MPI does not like bit-wise operations
+     on floating point numbers */
+  if (nodeID == 0) {
+    MPI_Reduce(MPI_IN_PLACE, centres, 3 * nr_cells, MPI_DOUBLE, MPI_SUM, 0,
+               MPI_COMM_WORLD);
+  } else {
+    MPI_Reduce(centres, NULL, 3 * nr_cells, MPI_DOUBLE, MPI_SUM, 0,
+               MPI_COMM_WORLD);
+  }
+#endif
+
+  /* Only rank 0 actually writes */
+  if (nodeID == 0) {
+
+    /* Unit conversion if necessary */
+    const double factor = units_conversion_factor(
+        internal_units, snapshot_units, UNIT_CONV_LENGTH);
+    if (factor != 1.) {
+
+      /* Convert the cell centres */
+      for (int i = 0; i < nr_cells; ++i) {
+        centres[i * 3 + 0] *= factor;
+        centres[i * 3 + 1] *= factor;
+        centres[i * 3 + 2] *= factor;
+      }
+
+      /* Convert the cell widths */
+      cell_width[0] *= factor;
+      cell_width[1] *= factor;
+      cell_width[2] *= factor;
+    }
+
+    /* Write some meta-information first */
+    hid_t h_subgrp =
+        H5Gcreate(h_grp, "Meta-data", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    if (h_subgrp < 0) error("Error while creating meta-data sub-group");
+    io_write_attribute(h_subgrp, "nr_cells", INT, &nr_cells, 1);
+    io_write_attribute(h_subgrp, "size", DOUBLE, cell_width, 3);
+    io_write_attribute(h_subgrp, "dimension", INT, cdim, 3);
+    H5Gclose(h_subgrp);
+
+    /* Write the centres to the group */
+    hsize_t shape[2] = {nr_cells, 3};
+    hid_t h_space = H5Screate(H5S_SIMPLE);
+    if (h_space < 0) error("Error while creating data space for cell centres");
+    hid_t h_err = H5Sset_extent_simple(h_space, 2, shape, shape);
+    if (h_err < 0)
+      error("Error while changing shape of gas offsets data space.");
+    hid_t h_data = H5Dcreate(h_grp, "Centres", io_hdf5_type(DOUBLE), h_space,
+                             H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    if (h_data < 0) error("Error while creating dataspace for gas offsets.");
+    h_err = H5Dwrite(h_data, io_hdf5_type(DOUBLE), h_space, H5S_ALL,
+                     H5P_DEFAULT, centres);
+    if (h_err < 0) error("Error while writing centres.");
+    H5Dclose(h_data);
+    H5Sclose(h_space);
+
+    /* Group containing the offsets for each particle type */
+    h_subgrp =
+        H5Gcreate(h_grp, "Offsets", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    if (h_subgrp < 0) error("Error while creating offsets sub-group");
+
+    if (global_counts[swift_type_gas] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0) error("Error while creating data space for gas offsets");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of gas offsets data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType0", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for gas offsets.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, offset_part);
+      if (h_err < 0) error("Error while writing gas offsets.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    if (global_counts[swift_type_dark_matter] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0) error("Error while creating data space for DM offsets");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of DM offsets data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType1", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for DM offsets.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, offset_gpart);
+      if (h_err < 0) error("Error while writing DM offsets.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    if (global_counts[swift_type_stars] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0)
+        error("Error while creating data space for stars offsets");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of stars offsets data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType4", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for star offsets.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, offset_spart);
+      if (h_err < 0) error("Error while writing star offsets.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    H5Gclose(h_subgrp);
+
+    /* Group containing the counts for each particle type */
+    h_subgrp =
+        H5Gcreate(h_grp, "Counts", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+    if (h_subgrp < 0) error("Error while creating counts sub-group");
+
+    if (global_counts[swift_type_gas] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0) error("Error while creating data space for gas counts");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of gas counts data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType0", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for gas counts.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, count_part);
+      if (h_err < 0) error("Error while writing gas counts.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    if (global_counts[swift_type_dark_matter] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0) error("Error while creating data space for DM counts");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of DM counts data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType1", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for DM counts.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, count_gpart);
+      if (h_err < 0) error("Error while writing DM counts.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    if (global_counts[swift_type_stars] > 0) {
+
+      shape[0] = nr_cells;
+      shape[1] = 1;
+      h_space = H5Screate(H5S_SIMPLE);
+      if (h_space < 0)
+        error("Error while creating data space for stars counts");
+      h_err = H5Sset_extent_simple(h_space, 1, shape, shape);
+      if (h_err < 0)
+        error("Error while changing shape of stars counts data space.");
+      h_data = H5Dcreate(h_subgrp, "PartType4", io_hdf5_type(LONGLONG), h_space,
+                         H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+      if (h_data < 0) error("Error while creating dataspace for star counts.");
+      h_err = H5Dwrite(h_data, io_hdf5_type(LONGLONG), h_space, H5S_ALL,
+                       H5P_DEFAULT, count_spart);
+      if (h_err < 0) error("Error while writing star counts.");
+      H5Dclose(h_data);
+      H5Sclose(h_space);
+    }
+
+    H5Gclose(h_subgrp);
+  }
+
+  /* Free everything we allocated */
+  free(centres);
+  free(count_part);
+  free(count_gpart);
+  free(count_spart);
+  free(offset_part);
+  free(offset_gpart);
+  free(offset_spart);
+}
+
 #endif /* HAVE_HDF5 */
 
 /**
@@ -478,6 +808,28 @@ void io_convert_part_d_mapper(void* restrict temp, int N,
                          &temp_d[i * dim]);
 }
 
+/**
+ * @brief Mapper function to copy #part into a buffer of doubles using a
+ * conversion function.
+ */
+void io_convert_part_l_mapper(void* restrict temp, int N,
+                              void* restrict extra_data) {
+
+  const struct io_props props = *((const struct io_props*)extra_data);
+  const struct part* restrict parts = props.parts;
+  const struct xpart* restrict xparts = props.xparts;
+  const struct engine* e = props.e;
+  const size_t dim = props.dimension;
+
+  /* How far are we with this chunk? */
+  long long* restrict temp_l = (long long*)temp;
+  const ptrdiff_t delta = (temp_l - props.start_temp_l) / dim;
+
+  for (int i = 0; i < N; i++)
+    props.convert_part_l(e, parts + delta + i, xparts + delta + i,
+                         &temp_l[i * dim]);
+}
+
 /**
  * @brief Mapper function to copy #gpart into a buffer of floats using a
  * conversion function.
@@ -518,6 +870,86 @@ void io_convert_gpart_d_mapper(void* restrict temp, int N,
     props.convert_gpart_d(e, gparts + delta + i, &temp_d[i * dim]);
 }
 
+/**
+ * @brief Mapper function to copy #gpart into a buffer of doubles using a
+ * conversion function.
+ */
+void io_convert_gpart_l_mapper(void* restrict temp, int N,
+                               void* restrict extra_data) {
+
+  const struct io_props props = *((const struct io_props*)extra_data);
+  const struct gpart* restrict gparts = props.gparts;
+  const struct engine* e = props.e;
+  const size_t dim = props.dimension;
+
+  /* How far are we with this chunk? */
+  long long* restrict temp_l = (long long*)temp;
+  const ptrdiff_t delta = (temp_l - props.start_temp_l) / dim;
+
+  for (int i = 0; i < N; i++)
+    props.convert_gpart_l(e, gparts + delta + i, &temp_l[i * dim]);
+}
+
+/**
+ * @brief Mapper function to copy #spart into a buffer of floats using a
+ * conversion function.
+ */
+void io_convert_spart_f_mapper(void* restrict temp, int N,
+                               void* restrict extra_data) {
+
+  const struct io_props props = *((const struct io_props*)extra_data);
+  const struct spart* restrict sparts = props.sparts;
+  const struct engine* e = props.e;
+  const size_t dim = props.dimension;
+
+  /* How far are we with this chunk? */
+  float* restrict temp_f = (float*)temp;
+  const ptrdiff_t delta = (temp_f - props.start_temp_f) / dim;
+
+  for (int i = 0; i < N; i++)
+    props.convert_spart_f(e, sparts + delta + i, &temp_f[i * dim]);
+}
+
+/**
+ * @brief Mapper function to copy #spart into a buffer of doubles using a
+ * conversion function.
+ */
+void io_convert_spart_d_mapper(void* restrict temp, int N,
+                               void* restrict extra_data) {
+
+  const struct io_props props = *((const struct io_props*)extra_data);
+  const struct spart* restrict sparts = props.sparts;
+  const struct engine* e = props.e;
+  const size_t dim = props.dimension;
+
+  /* How far are we with this chunk? */
+  double* restrict temp_d = (double*)temp;
+  const ptrdiff_t delta = (temp_d - props.start_temp_d) / dim;
+
+  for (int i = 0; i < N; i++)
+    props.convert_spart_d(e, sparts + delta + i, &temp_d[i * dim]);
+}
+
+/**
+ * @brief Mapper function to copy #spart into a buffer of doubles using a
+ * conversion function.
+ */
+void io_convert_spart_l_mapper(void* restrict temp, int N,
+                               void* restrict extra_data) {
+
+  const struct io_props props = *((const struct io_props*)extra_data);
+  const struct spart* restrict sparts = props.sparts;
+  const struct engine* e = props.e;
+  const size_t dim = props.dimension;
+
+  /* How far are we with this chunk? */
+  long long* restrict temp_l = (long long*)temp;
+  const ptrdiff_t delta = (temp_l - props.start_temp_l) / dim;
+
+  for (int i = 0; i < N; i++)
+    props.convert_spart_l(e, sparts + delta + i, &temp_l[i * dim]);
+}
+
 /**
  * @brief Copy the particle data into a temporary buffer ready for i/o.
  *
@@ -575,6 +1007,18 @@ void io_copy_temp_buffer(void* temp, const struct engine* e,
                      io_convert_part_d_mapper, temp_d, N, copySize, 0,
                      (void*)&props);
 
+    } else if (props.convert_part_l != NULL) {
+
+      /* Prepare some parameters */
+      long long* temp_l = (long long*)temp;
+      props.start_temp_l = (long long*)temp;
+      props.e = e;
+
+      /* Copy the whole thing into a buffer */
+      threadpool_map((struct threadpool*)&e->threadpool,
+                     io_convert_part_l_mapper, temp_l, N, copySize, 0,
+                     (void*)&props);
+
     } else if (props.convert_gpart_f != NULL) {
 
       /* Prepare some parameters */
@@ -599,6 +1043,54 @@ void io_copy_temp_buffer(void* temp, const struct engine* e,
                      io_convert_gpart_d_mapper, temp_d, N, copySize, 0,
                      (void*)&props);
 
+    } else if (props.convert_gpart_l != NULL) {
+
+      /* Prepare some parameters */
+      long long* temp_l = (long long*)temp;
+      props.start_temp_l = (long long*)temp;
+      props.e = e;
+
+      /* Copy the whole thing into a buffer */
+      threadpool_map((struct threadpool*)&e->threadpool,
+                     io_convert_gpart_l_mapper, temp_l, N, copySize, 0,
+                     (void*)&props);
+
+    } else if (props.convert_spart_f != NULL) {
+
+      /* Prepare some parameters */
+      float* temp_f = (float*)temp;
+      props.start_temp_f = (float*)temp;
+      props.e = e;
+
+      /* Copy the whole thing into a buffer */
+      threadpool_map((struct threadpool*)&e->threadpool,
+                     io_convert_spart_f_mapper, temp_f, N, copySize, 0,
+                     (void*)&props);
+
+    } else if (props.convert_spart_d != NULL) {
+
+      /* Prepare some parameters */
+      double* temp_d = (double*)temp;
+      props.start_temp_d = (double*)temp;
+      props.e = e;
+
+      /* Copy the whole thing into a buffer */
+      threadpool_map((struct threadpool*)&e->threadpool,
+                     io_convert_spart_d_mapper, temp_d, N, copySize, 0,
+                     (void*)&props);
+
+    } else if (props.convert_spart_l != NULL) {
+
+      /* Prepare some parameters */
+      long long* temp_l = (long long*)temp;
+      props.start_temp_l = (long long*)temp;
+      props.e = e;
+
+      /* Copy the whole thing into a buffer */
+      threadpool_map((struct threadpool*)&e->threadpool,
+                     io_convert_spart_l_mapper, temp_l, N, copySize, 0,
+                     (void*)&props);
+
     } else {
       error("Missing conversion function");
     }
@@ -630,9 +1122,9 @@ void io_prepare_dm_gparts_mapper(void* restrict data, int Ndm, void* dummy) {
   /* Let's give all these gparts a negative id */
   for (int i = 0; i < Ndm; ++i) {
 
-    /* 0 or negative ids are not allowed */
-    if (gparts[i].id_or_neg_offset <= 0)
-      error("0 or negative ID for DM particle %i: ID=%lld", i,
+    /* Negative ids are not allowed */
+    if (gparts[i].id_or_neg_offset < 0)
+      error("Negative ID for DM particle %i: ID=%lld", i,
             gparts[i].id_or_neg_offset);
 
     /* Set gpart type */
@@ -747,7 +1239,7 @@ void io_duplicate_hydro_sparts_mapper(void* restrict data, int Nstars,
     gparts[i + Ndm].mass = sparts[i].mass;
 
     /* Set gpart type */
-    gparts[i + Ndm].type = swift_type_star;
+    gparts[i + Ndm].type = swift_type_stars;
 
     /* Link the particles */
     gparts[i + Ndm].id_or_neg_offset = -(long long)(offset + i);
@@ -768,9 +1260,10 @@ void io_duplicate_hydro_sparts_mapper(void* restrict data, int Nstars,
  * @param Nstars The number of stars particles read in.
  * @param Ndm The number of DM and gas particles read in.
  */
-void io_duplicate_star_gparts(struct threadpool* tp, struct spart* const sparts,
-                              struct gpart* const gparts, size_t Nstars,
-                              size_t Ndm) {
+void io_duplicate_stars_gparts(struct threadpool* tp,
+                               struct spart* const sparts,
+                               struct gpart* const gparts, size_t Nstars,
+                               size_t Ndm) {
 
   struct duplication_data data;
   data.gparts = gparts;
@@ -782,35 +1275,120 @@ void io_duplicate_star_gparts(struct threadpool* tp, struct spart* const sparts,
 }
 
 /**
- * @brief Copy every DM #gpart into the dmparts array.
+ * @brief Copy every non-inhibited #part into the parts_written array.
+ *
+ * @param parts The array of #part containing all particles.
+ * @param xparts The array of #xpart containing all particles.
+ * @param parts_written The array of #part to fill with particles we want to
+ * write.
+ * @param xparts_written The array of #xpart  to fill with particles we want to
+ * write.
+ * @param Nparts The total number of #part.
+ * @param Nparts_written The total number of #part to write.
+ */
+void io_collect_parts_to_write(const struct part* restrict parts,
+                               const struct xpart* restrict xparts,
+                               struct part* restrict parts_written,
+                               struct xpart* restrict xparts_written,
+                               const size_t Nparts,
+                               const size_t Nparts_written) {
+
+  size_t count = 0;
+
+  /* Loop over all parts */
+  for (size_t i = 0; i < Nparts; ++i) {
+
+    /* And collect the ones that have not been removed */
+    if (parts[i].time_bin != time_bin_inhibited &&
+        parts[i].time_bin != time_bin_not_created) {
+
+      parts_written[count] = parts[i];
+      xparts_written[count] = xparts[i];
+      count++;
+    }
+  }
+
+  /* Check that everything is fine */
+  if (count != Nparts_written)
+    error("Collected the wrong number of particles (%zu vs. %zu expected)",
+          count, Nparts_written);
+}
+
+/**
+ * @brief Copy every non-inhibited #spart into the sparts_written array.
+ *
+ * @param sparts The array of #spart containing all particles.
+ * @param sparts_written The array of #spart to fill with particles we want to
+ * write.
+ * @param Nsparts The total number of #part.
+ * @param Nsparts_written The total number of #part to write.
+ */
+void io_collect_sparts_to_write(const struct spart* restrict sparts,
+                                struct spart* restrict sparts_written,
+                                const size_t Nsparts,
+                                const size_t Nsparts_written) {
+
+  size_t count = 0;
+
+  /* Loop over all parts */
+  for (size_t i = 0; i < Nsparts; ++i) {
+
+    /* And collect the ones that have not been removed */
+    if (sparts[i].time_bin != time_bin_inhibited &&
+        sparts[i].time_bin != time_bin_not_created) {
+
+      sparts_written[count] = sparts[i];
+      count++;
+    }
+  }
+
+  /* Check that everything is fine */
+  if (count != Nsparts_written)
+    error("Collected the wrong number of s-particles (%zu vs. %zu expected)",
+          count, Nsparts_written);
+}
+
+/**
+ * @brief Copy every non-inhibited DM #gpart into the gparts_written array.
  *
  * @param gparts The array of #gpart containing all particles.
- * @param Ntot The number of #gpart.
- * @param dmparts The array of #gpart containg DM particles to be filled.
- * @param Ndm The number of DM particles.
+ * @param vr_data The array of gpart-related VELOCIraptor output.
+ * @param gparts_written The array of #gpart to fill with particles we want to
+ * write.
+ * @param vr_data_written The array of gpart-related VELOCIraptor with particles
+ * we want to write.
+ * @param Ngparts The total number of #part.
+ * @param Ngparts_written The total number of #part to write.
+ * @param with_stf Are we running with STF? i.e. do we want to collect vr data?
  */
-void io_collect_dm_gparts(const struct gpart* const gparts, size_t Ntot,
-                          struct gpart* const dmparts, size_t Ndm) {
+void io_collect_gparts_to_write(
+    const struct gpart* restrict gparts,
+    const struct velociraptor_gpart_data* restrict vr_data,
+    struct gpart* restrict gparts_written,
+    struct velociraptor_gpart_data* restrict vr_data_written,
+    const size_t Ngparts, const size_t Ngparts_written, const int with_stf) {
 
   size_t count = 0;
 
-  /* Loop over all gparts */
-  for (size_t i = 0; i < Ntot; ++i) {
+  /* Loop over all parts */
+  for (size_t i = 0; i < Ngparts; ++i) {
+
+    /* And collect the ones that have not been removed */
+    if ((gparts[i].time_bin != time_bin_inhibited) &&
+        (gparts[i].time_bin != time_bin_not_created) &&
+        (gparts[i].type == swift_type_dark_matter)) {
 
-    /* message("i=%zd count=%zd id=%lld part=%p", i, count, gparts[i].id,
-     * gparts[i].part); */
+      if (with_stf) vr_data_written[count] = vr_data[i];
 
-    /* And collect the DM ones */
-    if (gparts[i].type == swift_type_dark_matter) {
-      dmparts[count] = gparts[i];
+      gparts_written[count] = gparts[i];
       count++;
     }
   }
 
   /* Check that everything is fine */
-  if (count != Ndm)
-    error("Collected the wrong number of dm particles (%zu vs. %zu expected)",
-          count, Ndm);
+  if (count != Ngparts_written)
+    error("Collected the wrong number of g-particles (%zu vs. %zu expected)",
+          count, Ngparts_written);
 }
 
 /**
@@ -853,8 +1431,8 @@ void io_check_output_fields(const struct swift_params* params,
         darkmatter_write_particles(&gp, list, &num_fields);
         break;
 
-      case swift_type_star:
-        star_write_particles(&sp, list, &num_fields);
+      case swift_type_stars:
+        stars_write_particles(&sp, list, &num_fields);
         break;
 
       default:
@@ -939,8 +1517,8 @@ void io_write_output_field_parameter(const char* filename) {
         darkmatter_write_particles(NULL, list, &num_fields);
         break;
 
-      case swift_type_star:
-        star_write_particles(NULL, list, &num_fields);
+      case swift_type_stars:
+        stars_write_particles(NULL, list, &num_fields);
         break;
 
       default:
diff --git a/src/common_io.h b/src/common_io.h
index 152b40a8d7c931b3398f4f04d3a61e9cf7f1836c..eb1ee0a804f324d897842fb2a0ca33fc07e769d6 100644
--- a/src/common_io.h
+++ b/src/common_io.h
@@ -24,6 +24,7 @@
 #include "../config.h"
 
 /* Local includes. */
+#include "part_type.h"
 #include "units.h"
 
 #define FIELD_BUFFER_SIZE 200
@@ -32,9 +33,12 @@
 #define IO_BUFFER_ALIGNMENT 1024
 
 /* Avoid cyclic inclusion problems */
+struct cell;
 struct part;
 struct gpart;
+struct velociraptor_gpart_data;
 struct spart;
+struct xpart;
 struct io_props;
 struct engine;
 struct threadpool;
@@ -56,12 +60,6 @@ enum IO_DATA_TYPE {
   CHAR
 };
 
-/**
- * @brief The different formats for when to run structure finding.
- *
- */
-enum IO_STF_OUTPUT_FORMAT { STEPS = 0, TIME };
-
 #if defined(HAVE_HDF5)
 
 hid_t io_hdf5_type(enum IO_DATA_TYPE type);
@@ -70,7 +68,7 @@ void io_read_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type,
                        void* data);
 
 void io_write_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type,
-                        void* data, int num);
+                        const void* data, int num);
 
 void io_write_attribute_d(hid_t grp, const char* name, double data);
 void io_write_attribute_f(hid_t grp, const char* name, float data);
@@ -81,6 +79,14 @@ void io_write_attribute_s(hid_t grp, const char* name, const char* str);
 void io_write_code_description(hid_t h_file);
 void io_write_engine_policy(hid_t h_file, const struct engine* e);
 
+void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
+                           const struct cell* cells_top, const int nr_cells,
+                           const double width[3], const int nodeID,
+                           const long long global_counts[swift_type_count],
+                           const long long global_offsets[swift_type_count],
+                           const struct unit_system* internal_units,
+                           const struct unit_system* snapshot_units);
+
 void io_read_unit_system(hid_t h_file, struct unit_system* ic_units,
                          const struct unit_system* internal_units,
                          int mpi_rank);
@@ -97,16 +103,31 @@ void io_copy_temp_buffer(void* temp, const struct engine* e,
 size_t io_sizeof_type(enum IO_DATA_TYPE type);
 int io_is_double_precision(enum IO_DATA_TYPE type);
 
-void io_collect_dm_gparts(const struct gpart* const gparts, size_t Ntot,
-                          struct gpart* const dmparts, size_t Ndm);
+void io_collect_parts_to_write(const struct part* restrict parts,
+                               const struct xpart* restrict xparts,
+                               struct part* restrict parts_written,
+                               struct xpart* restrict xparts_written,
+                               const size_t Nparts,
+                               const size_t Nparts_written);
+void io_collect_sparts_to_write(const struct spart* restrict sparts,
+                                struct spart* restrict sparts_written,
+                                const size_t Nsparts,
+                                const size_t Nsparts_written);
+void io_collect_gparts_to_write(const struct gpart* restrict gparts,
+                                const struct velociraptor_gpart_data* vr_data,
+                                struct gpart* restrict gparts_written,
+                                struct velociraptor_gpart_data* vr_data_written,
+                                const size_t Ngparts,
+                                const size_t Ngparts_written, int with_stf);
 void io_prepare_dm_gparts(struct threadpool* tp, struct gpart* const gparts,
                           size_t Ndm);
 void io_duplicate_hydro_gparts(struct threadpool* tp, struct part* const parts,
                                struct gpart* const gparts, size_t Ngas,
                                size_t Ndm);
-void io_duplicate_star_gparts(struct threadpool* tp, struct spart* const sparts,
-                              struct gpart* const gparts, size_t Nstars,
-                              size_t Ndm);
+void io_duplicate_stars_gparts(struct threadpool* tp,
+                               struct spart* const sparts,
+                               struct gpart* const gparts, size_t Nstars,
+                               size_t Ndm);
 
 void io_check_output_fields(const struct swift_params* params,
                             const long long N_total[3]);
diff --git a/src/const.h b/src/const.h
index 6c5b5299c08efb7935b046ecfd0b3d67b7dc4c7a..613a48920e6f26c209faf6e354b82c2ed5be0bf1 100644
--- a/src/const.h
+++ b/src/const.h
@@ -21,13 +21,10 @@
 #define SWIFT_CONST_H
 
 /* SPH Viscosity constants. */
-#define const_viscosity_alpha 0.8f
-#define const_viscosity_alpha_min \
-  0.1f /* Values taken from (Price,2004), not used in legacy gadget mode */
-#define const_viscosity_alpha_max \
-  2.0f /* Values taken from (Price,2004), not used in legacy gadget mode */
-#define const_viscosity_length \
-  0.1f /* Values taken from (Price,2004), not used in legacy gadget mode */
+/* Cosmology default beta=3.0. Planetary default beta=4.0
+ * Alpha can be set in the parameter file.
+ * Beta is defined as in e.g. Price (2010) Eqn (103) */
+#define const_viscosity_beta 3.0f
 
 /* SPH Thermal conductivity constants. */
 #define const_conductivity_alpha \
@@ -36,6 +33,9 @@
 /* Time integration constants. */
 #define const_max_u_change 0.1f
 
+/* Time-step limiter maximal difference in signal velocity */
+#define const_limiter_max_v_sig_ratio 4.1f
+
 /* Type of gradients to use (GIZMO_SPH only) */
 /* If no option is chosen, no gradients are used (first order scheme) */
 //#define GRADIENTS_SPH
diff --git a/src/cooling.c b/src/cooling.c
index 154b859f74402d9e9a8adf1fb6c796b5195b8cd1..34205937bbd7ce144503b10ef047cf5b552f23cc 100644
--- a/src/cooling.c
+++ b/src/cooling.c
@@ -53,28 +53,3 @@ void cooling_print(const struct cooling_function_data* cooling) {
 
   cooling_print_backend(cooling);
 }
-
-/**
- * @brief Write a cooling struct to the given FILE as a stream of bytes.
- *
- * @param cooling the struct
- * @param stream the file stream
- */
-void cooling_struct_dump(const struct cooling_function_data* cooling,
-                         FILE* stream) {
-  restart_write_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
-                       stream, "cooling", "cooling function");
-}
-
-/**
- * @brief Restore a hydro_props struct from the given FILE as a stream of
- * bytes.
- *
- * @param cooling the struct
- * @param stream the file stream
- */
-void cooling_struct_restore(const struct cooling_function_data* cooling,
-                            FILE* stream) {
-  restart_read_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
-                      stream, NULL, "cooling function");
-}
diff --git a/src/cooling.h b/src/cooling.h
index 0fb04b9e484d989e746a254fc1934dc20033fb09..875ef5054491f783d526e7c8e2caf3e005c8a5a0 100644
--- a/src/cooling.h
+++ b/src/cooling.h
@@ -27,6 +27,12 @@
 /* Config parameters. */
 #include "../config.h"
 
+/* Local includes */
+#include "parser.h"
+#include "physical_constants.h"
+#include "restart.h"
+#include "units.h"
+
 /* Import the right cooling definition */
 #if defined(COOLING_NONE)
 #include "./cooling/none/cooling.h"
@@ -34,6 +40,8 @@
 #include "./cooling/const_du/cooling.h"
 #elif defined(COOLING_CONST_LAMBDA)
 #include "./cooling/const_lambda/cooling.h"
+#elif defined(COOLING_COMPTON)
+#include "./cooling/Compton/cooling.h"
 #elif defined(COOLING_GRACKLE)
 #include "./cooling/grackle/cooling.h"
 #elif defined(COOLING_EAGLE)
@@ -53,7 +61,7 @@ void cooling_print(const struct cooling_function_data* cooling);
 /* Dump/restore. */
 void cooling_struct_dump(const struct cooling_function_data* cooling,
                          FILE* stream);
-void cooling_struct_restore(const struct cooling_function_data* cooling,
-                            FILE* stream);
+void cooling_struct_restore(struct cooling_function_data* cooling, FILE* stream,
+                            const struct cosmology* cosmo);
 
 #endif /* SWIFT_COOLING_H */
diff --git a/src/cooling/Compton/cooling.h b/src/cooling/Compton/cooling.h
new file mode 100644
index 0000000000000000000000000000000000000000..c796375c33c586e2c9a95515f4124062a41640eb
--- /dev/null
+++ b/src/cooling/Compton/cooling.h
@@ -0,0 +1,419 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_COOLING_COMPTON_H
+#define SWIFT_COOLING_COMPTON_H
+
+/**
+ * @file src/cooling/Compton/cooling.h
+ * @brief Routines related to the "Compton" cooling function.
+ *
+ * This model compute the cooling rate from the Compton interaction with
+ * the CMB photons.
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <float.h>
+#include <math.h>
+
+/* Local includes. */
+#include "entropy_floor.h"
+#include "error.h"
+#include "hydro.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "units.h"
+
+/**
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift.
+ *
+ * @param cosmo The current cosmological model.
+ * @param cooling The #cooling_function_data used in the run.
+ */
+INLINE static void cooling_update(const struct cosmology* cosmo,
+                                  struct cooling_function_data* cooling) {
+  // Add content if required.
+}
+
+/**
+ * @brief Calculates du/dt in CGS units for a particle.
+ *
+ * @param cosmo The current cosmological model.
+ * @param phys_const The physical constants in internal units.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param cooling The #cooling_function_data used in the run.
+ * @param z The current redshift.
+ * @param u The current internal energy in internal units.
+ * @param p Pointer to the particle data.
+ * @return The change in energy per unit mass due to cooling for this particle
+ * in cgs units [erg * g^-1 * s^-1].
+ */
+__attribute__((always_inline)) INLINE static double Compton_cooling_rate_cgs(
+    const struct cosmology* cosmo, const struct phys_const* restrict phys_const,
+    const struct hydro_props* hydro_props,
+    const struct cooling_function_data* cooling, const double z, const double u,
+    const struct part* p) {
+
+  /* Get particle density */
+  const double rho = hydro_get_physical_density(p, cosmo);
+  const double rho_cgs = rho * cooling->conv_factor_density_to_cgs;
+
+  /* Powers of (1 + z) */
+  const double zp1 = z + 1.;
+  const double zp1p2 = zp1 * zp1;
+  const double zp1p4 = zp1p2 * zp1p2; /* (1 + z)^4 */
+
+  /* CMB temperature at this redshift */
+  const double T_CMB = cooling->const_T_CMB_0 * zp1;
+
+  /* Physical constants */
+  const double m_H = phys_const->const_proton_mass;
+  const double k_B = phys_const->const_boltzmann_k;
+
+  /* Gas properties */
+  const double T_transition = hydro_props->hydrogen_ionization_temperature;
+  const double mu_neutral = hydro_props->mu_neutral;
+  const double mu_ionised = hydro_props->mu_ionised;
+
+  /* Temperature over mean molecular weight */
+  const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B;
+
+  double T;
+
+  /* Are we above or below the HII -> HI transition? */
+  if (T_over_mu > (T_transition + 1.) / mu_ionised)
+    T = T_over_mu * mu_ionised;
+  else if (T_over_mu < (T_transition - 1.) / mu_neutral)
+    T = T_over_mu * mu_neutral;
+  else
+    T = T_transition;
+
+  /* Electron abundance */
+  double electron_abundance = 0.;  // MATTHIEU: To do: compute X_e
+
+  /* Temperature difference with the CMB */
+  const double delta_T = T - T_CMB;
+
+  /* Electron density */
+  const double electron_density_cgs =
+      rho_cgs * electron_abundance * cooling->proton_mass_cgs_inv;
+
+  /* Compton formula */
+  return cooling->const_Compton_rate_cgs * delta_T * zp1p4 *
+         electron_density_cgs / rho_cgs;
+}
+
+/**
+ * @brief Apply the cooling function to a particle.
+ *
+ * @param phys_const The physical constants in internal units.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param floor_props Properties of the entropy floor.
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the particle' extended data.
+ * @param dt The time-step of this particle.
+ * @param dt_therm The time-step operator used for thermal quantities.
+ */
+__attribute__((always_inline)) INLINE static void cooling_cool_part(
+    const struct phys_const* restrict phys_const,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct hydro_props* hydro_props,
+    const struct entropy_floor_properties* floor_props,
+    const struct cooling_function_data* restrict cooling,
+    struct part* restrict p, struct xpart* restrict xp, const float dt,
+    const float dt_therm) {
+
+  /* Nothing to do here? */
+  if (dt == 0.) return;
+
+  /* Current energy */
+  const float u_old = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Current du_dt in physical coordinates (internal units) */
+  const float hydro_du_dt = hydro_get_physical_internal_energy_dt(p, cosmo);
+
+  /* Calculate cooling du_dt (in cgs units) */
+  const double cooling_du_dt_cgs = Compton_cooling_rate_cgs(
+      cosmo, phys_const, hydro_props, cooling, cosmo->z, u_old, p);
+
+  /* Convert to internal units */
+  float cooling_du_dt =
+      cooling_du_dt_cgs * cooling->conv_factor_energy_rate_from_cgs;
+
+  /* Add cosmological term */
+  cooling_du_dt *= cosmo->a * cosmo->a;
+
+  float total_du_dt = hydro_du_dt + cooling_du_dt;
+
+  /* We now need to check that we are not going to go below any of the limits */
+
+  /* Limit imposed by the entropy floor */
+  const float A_floor = entropy_floor(p, cosmo, floor_props);
+  const float rho = hydro_get_physical_density(p, cosmo);
+  const float u_floor = gas_internal_energy_from_entropy(rho, A_floor);
+
+  /* Absolute minimum */
+  const float u_minimal = hydro_props->minimal_internal_energy;
+
+  /* Largest of both limits */
+  const float u_limit = max(u_minimal, u_floor);
+
+  /* First, check whether we may end up below the minimal energy after
+   * this step 1/2 kick + another 1/2 kick that could potentially be for
+   * a time-step twice as big. We hence check for 1.5 delta_t. */
+  if (u_old + total_du_dt * 1.5 * dt_therm < u_limit) {
+    total_du_dt = (u_limit - u_old) / (1.5f * dt_therm);
+  }
+
+  /* Second, check whether the energy used in the prediction could get negative.
+   * We need to check for the 1/2 dt kick followed by a full time-step drift
+   * that could potentially be for a time-step twice as big. We hence check
+   * for 2.5 delta_t but this time against 0 energy not the minimum */
+  if (u_old + total_du_dt * 2.5 * dt_therm < 0.) {
+    total_du_dt = -u_old / ((2.5f + 0.0001f) * dt_therm);
+  }
+
+  /* Update the internal energy time derivative */
+  hydro_set_physical_internal_energy_dt(p, cosmo, total_du_dt);
+
+  /* Store the radiated energy (assuming dt will not change) */
+  xp->cooling_data.radiated_energy +=
+      -hydro_get_mass(p) * (total_du_dt - hydro_du_dt) * dt_therm;
+}
+
+/**
+ * @brief Computes the time-step due to cooling for this particle.
+ *
+ * We impose no time-step limit.
+ *
+ * @param cooling The #cooling_function_data used in the run.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended data of the particle.
+ */
+__attribute__((always_inline)) INLINE static float cooling_timestep(
+    const struct cooling_function_data* restrict cooling,
+    const struct phys_const* restrict phys_const,
+    const struct cosmology* restrict cosmo,
+    const struct unit_system* restrict us,
+    const struct hydro_props* hydro_props, const struct part* restrict p,
+    const struct xpart* restrict xp) {
+
+  return FLT_MAX;
+}
+
+/**
+ * @brief Sets the cooling properties of the (x-)particles to a valid start
+ * state.
+ *
+ * Nothing to do here. Just set the radiated energy counter to 0.
+ *
+ * @param phys_const The physical constants in internal units.
+ * @param cooling The properties of the cooling function.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void cooling_first_init_part(
+    const struct phys_const* restrict phys_const,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, struct xpart* restrict xp) {
+
+  xp->cooling_data.radiated_energy = 0.f;
+}
+
+/**
+ * @brief Compute the temperature of a #part based on the cooling function.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+INLINE static float cooling_get_temperature(
+    const struct phys_const* restrict phys_const,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, const struct xpart* restrict xp) {
+
+  /* Physical constants */
+  const double m_H = phys_const->const_proton_mass;
+  const double k_B = phys_const->const_boltzmann_k;
+
+  /* Gas properties */
+  const double T_transition = hydro_props->hydrogen_ionization_temperature;
+  const double mu_neutral = hydro_props->mu_neutral;
+  const double mu_ionised = hydro_props->mu_ionised;
+
+  /* Particle temperature */
+  const double u = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Temperature over mean molecular weight */
+  const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B;
+
+  /* Are we above or below the HII -> HI transition? */
+  if (T_over_mu > (T_transition + 1.) / mu_ionised)
+    return T_over_mu * mu_ionised;
+  else if (T_over_mu < (T_transition - 1.) / mu_neutral)
+    return T_over_mu * mu_neutral;
+  else
+    return T_transition;
+}
+
+/**
+ * @brief Returns the total radiated energy by this particle.
+ *
+ * @param xp The extended particle data
+ */
+__attribute__((always_inline)) INLINE static float cooling_get_radiated_energy(
+    const struct xpart* restrict xp) {
+
+  return xp->cooling_data.radiated_energy;
+}
+
+/**
+ * @brief Initialises the cooling properties.
+ *
+ * @param parameter_file The parsed parameter file.
+ * @param us The current internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cooling The cooling properties to initialize
+ */
+static INLINE void cooling_init_backend(struct swift_params* parameter_file,
+                                        const struct unit_system* us,
+                                        const struct phys_const* phys_const,
+                                        struct cooling_function_data* cooling) {
+
+  /* Some useful conversion values */
+  cooling->conv_factor_density_to_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_DENSITY);
+  cooling->conv_factor_energy_to_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
+  cooling->conv_factor_energy_rate_from_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_TIME) /
+      units_cgs_conversion_factor(us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
+
+  /* Useful constants */
+  cooling->proton_mass_cgs_inv =
+      1. / (phys_const->const_proton_mass *
+            units_cgs_conversion_factor(us, UNIT_CONV_MASS));
+
+  /* Temperature of the CMB in CGS */
+  const double T_CMB_0 = phys_const->const_T_CMB_0 *
+                         units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE);
+  cooling->const_T_CMB_0 = T_CMB_0; /* [K] */
+
+  /* Compute the coefficient at the front of the Compton cooling expression */
+  const double radiation_constant =
+      4. * phys_const->const_stefan_boltzmann / phys_const->const_speed_light_c;
+  const double compton_coefficient =
+      4. * radiation_constant * phys_const->const_thomson_cross_section *
+      phys_const->const_boltzmann_k /
+      (phys_const->const_electron_mass * phys_const->const_speed_light_c);
+  const float dimension_coefficient[5] = {1, 2, -3, 0, -5};
+
+  /* This should be ~1.0178085e-37 [g cm^2 s^-3 K^-5] */
+  const double compton_coefficient_cgs =
+      compton_coefficient *
+      units_general_cgs_conversion_factor(us, dimension_coefficient);
+
+  /* And now the Compton rate [g cm^2 s^-3 K^-1] == [erg s^-1 K^-1]*/
+  cooling->const_Compton_rate_cgs =
+      compton_coefficient_cgs * T_CMB_0 * T_CMB_0 * T_CMB_0 * T_CMB_0;
+}
+
+/**
+ * @brief Restore cooling tables (if applicable) after
+ * restart
+ *
+ * @param cooling the cooling_function_data structure
+ * @param cosmo cosmology structure
+ */
+static INLINE void cooling_restore_tables(struct cooling_function_data* cooling,
+                                          const struct cosmology* cosmo) {}
+
+/**
+ * @brief Prints the properties of the cooling model to stdout.
+ *
+ * @param cooling The properties of the cooling function.
+ */
+static INLINE void cooling_print_backend(
+    const struct cooling_function_data* cooling) {
+
+  message("Cooling function is 'Compton cooling'.");
+}
+
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * @param cooling the cooling data structure.
+ */
+static INLINE void cooling_clean(struct cooling_function_data* cooling) {}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * Nothing to do beyond writing the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+static INLINE void cooling_struct_dump(
+    const struct cooling_function_data* cooling, FILE* stream) {
+  restart_write_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                       stream, "cooling", "cooling function");
+}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Nothing to do beyond reading the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+static INLINE void cooling_struct_restore(struct cooling_function_data* cooling,
+                                          FILE* stream,
+                                          const struct cosmology* cosmo) {
+  restart_read_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                      stream, NULL, "cooling function");
+}
+
+#endif /* SWIFT_COOLING_COMPTON_H */
diff --git a/src/cooling/Compton/cooling_io.h b/src/cooling/Compton/cooling_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..8fa3944ff78e7592da3978ee9465451c96e1d533
--- /dev/null
+++ b/src/cooling/Compton/cooling_io.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_COOLING_IO_COMPTON_H
+#define SWIFT_COOLING_IO_COMPTON_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "cooling.h"
+#include "io_properties.h"
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of cooling to the file
+ * @param h_grp The HDF5 group in which to write
+ * @param cooling the parameters of the cooling function.
+ */
+__attribute__((always_inline)) INLINE static void cooling_write_flavour(
+    hid_t h_grp, const struct cooling_function_data* cooling) {
+
+  io_write_attribute_s(h_grp, "Cooling Model", "Compton cooling");
+  io_write_attribute_d(h_grp, "Compton rate [erg s^-1 K^-1]",
+                       cooling->const_Compton_rate_cgs);
+}
+#endif
+
+INLINE static void convert_part_T(const struct engine* e, const struct part* p,
+                                  const struct xpart* xp, float* ret) {
+
+  ret[0] = cooling_get_temperature(e->physical_constants, e->hydro_properties,
+                                   e->internal_units, e->cosmology,
+                                   e->cooling_func, p, xp);
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended particle array.
+ * @param list The list of i/o properties to write.
+ * @param cooling The #cooling_function_data
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int cooling_write_particles(
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
+    const struct cooling_function_data* cooling) {
+
+  list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1,
+                                              UNIT_CONV_TEMPERATURE, parts,
+                                              xparts, convert_part_T);
+
+  return 1;
+}
+
+#endif /* SWIFT_COOLING_IO_COMPTON_H */
diff --git a/src/cooling/Compton/cooling_struct.h b/src/cooling/Compton/cooling_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..1e09d492b1c8f6eb92f9d4f5faa00998dc2daec9
--- /dev/null
+++ b/src/cooling/Compton/cooling_struct.h
@@ -0,0 +1,56 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_COOLING_STRUCT_COMPTON_H
+#define SWIFT_COOLING_STRUCT_COMPTON_H
+
+/**
+ * @brief Properties of the cooling function.
+ */
+struct cooling_function_data {
+
+  /*! Compton rate in cgs [g cm^2 s^-3 K^-1] */
+  double const_Compton_rate_cgs;
+
+  /*! Temperature of the CMB at redshift 0 in cgs [K] */
+  double const_T_CMB_0;
+
+  /*! Conversion factor from internal units to cgs for density */
+  double conv_factor_density_to_cgs;
+
+  /*! Conversion factor from internal units to cgs for internal energy */
+  double conv_factor_energy_to_cgs;
+
+  /*! Conversion factor from internal units from cgs for internal energy
+   * derivative */
+  double conv_factor_energy_rate_from_cgs;
+
+  /*! Inverse of the proton mass in cgs units [g^-1] */
+  double proton_mass_cgs_inv;
+};
+
+/**
+ * @brief Properties of the cooling stored in the particle data.
+ */
+struct cooling_xpart_data {
+
+  /*! Energy radiated away by this particle since the start of the run */
+  float radiated_energy;
+};
+
+#endif /* SWIFT_COOLING_STRUCT_COMPTON_H */
diff --git a/src/cooling/EAGLE/cooling.c b/src/cooling/EAGLE/cooling.c
new file mode 100644
index 0000000000000000000000000000000000000000..87c12e73d970b4d1caa6ce33c20666556463e08c
--- /dev/null
+++ b/src/cooling/EAGLE/cooling.c
@@ -0,0 +1,973 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+/**
+ * @file src/cooling/EAGLE/cooling.c
+ * @brief EAGLE cooling functions
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <float.h>
+#include <hdf5.h>
+#include <math.h>
+#include <time.h>
+
+/* Local includes. */
+#include "chemistry.h"
+#include "cooling.h"
+#include "cooling_rates.h"
+#include "cooling_struct.h"
+#include "cooling_tables.h"
+#include "entropy_floor.h"
+#include "error.h"
+#include "exp10.h"
+#include "hydro.h"
+#include "interpolate.h"
+#include "io_properties.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "units.h"
+
+/* Maximum number of iterations for newton
+ * and bisection integration schemes */
+static const int newton_max_iterations = 15;
+static const int bisection_max_iterations = 150;
+
+/* Tolerances for termination criteria. */
+static const float explicit_tolerance = 0.05;
+static const float newton_tolerance = 1.0e-4;
+static const float bisection_tolerance = 1.0e-6;
+static const float rounding_tolerance = 1.0e-4;
+static const double bracket_factor = 1.5;              /* sqrt(1.1) */
+static const double newton_log_u_guess_cgs = 12.30103; /* log10(2e12) */
+
+/**
+ * @brief Find the index of the current redshift along the redshift dimension
+ * of the cooling tables.
+ *
+ * Since the redshift table is not evenly spaced, compare z with each
+ * table value in decreasing order starting with the previous redshift index
+ *
+ * The returned difference is expressed in units of the table separation. This
+ * means dx = (x - table[i]) / (table[i+1] - table[i]). It is always between
+ * 0 and 1.
+ *
+ * @param z Redshift we are searching for.
+ * @param z_index (return) Index of the redshift in the table.
+ * @param dz (return) Difference in redshift between z and table[z_index].
+ * @param cooling #cooling_function_data structure containing redshift table.
+ */
+__attribute__((always_inline)) INLINE void get_redshift_index(
+    const float z, int *z_index, float *dz,
+    struct cooling_function_data *restrict cooling) {
+
+  /* Before the earliest redshift or before hydrogen reionization, flag for
+   * collisional cooling */
+  if (z > cooling->H_reion_z) {
+    *z_index = eagle_cooling_N_redshifts;
+    *dz = 0.0;
+  }
+
+  /* From reionization use the cooling tables */
+  else if (z > cooling->Redshifts[eagle_cooling_N_redshifts - 1] &&
+           z <= cooling->H_reion_z) {
+    *z_index = eagle_cooling_N_redshifts + 1;
+    *dz = 0.0;
+  }
+
+  /* At the end, just use the last value */
+  else if (z <= cooling->Redshifts[0]) {
+    *z_index = 0;
+    *dz = 0.0;
+  }
+
+  /* Normal case: search... */
+  else {
+
+    /* start at the previous index and search */
+    for (int i = cooling->previous_z_index; i >= 0; i--) {
+      if (z > cooling->Redshifts[i]) {
+
+        *z_index = i;
+        cooling->previous_z_index = i;
+
+        *dz = (z - cooling->Redshifts[i]) /
+              (cooling->Redshifts[i + 1] - cooling->Redshifts[i]);
+        break;
+      }
+    }
+  }
+}
+
+/**
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift. Predominantly used to read cooling tables
+ * above and below the current redshift, if not already read in.
+ *
+ * @param cosmo The current cosmological model.
+ * @param cooling The #cooling_function_data used in the run.
+ */
+void cooling_update(const struct cosmology *cosmo,
+                    struct cooling_function_data *cooling) {
+
+  /* Current redshift */
+  const float redshift = cosmo->z;
+
+  /* What is the current table index along the redshift axis? */
+  int z_index = -1;
+  float dz = 0.f;
+  get_redshift_index(redshift, &z_index, &dz, cooling);
+  cooling->dz = dz;
+
+  /* Do we already have the correct tables loaded? */
+  if (cooling->z_index == z_index) return;
+
+  /* Which table should we load ? */
+  if (z_index >= eagle_cooling_N_redshifts) {
+
+    if (z_index == eagle_cooling_N_redshifts + 1) {
+
+      /* Bewtween re-ionization and first table */
+      get_redshift_invariant_table(cooling, /* photodis=*/0);
+
+    } else {
+
+      /* Above re-ionization */
+      get_redshift_invariant_table(cooling, /* photodis=*/1);
+    }
+
+  } else {
+
+    /* Normal case: two tables bracketing the current z */
+    const int low_z_index = z_index;
+    const int high_z_index = z_index + 1;
+
+    get_cooling_table(cooling, low_z_index, high_z_index);
+  }
+
+  /* Store the currently loaded index */
+  cooling->z_index = z_index;
+}
+
+/**
+ * @brief Newton Raphson integration scheme to calculate particle cooling over
+ * timestep. This replaces bisection scheme used in EAGLE to minimize the
+ * number of array accesses. Integration defaults to bisection scheme (see
+ * function bisection_iter) if this function does not converge within a
+ * specified number of steps
+ *
+ * @param logu_init Initial guess for log(internal energy)
+ * @param u_ini Internal energy at beginning of hydro step
+ * @param n_H_index Particle hydrogen number density index
+ * @param d_n_H Particle hydrogen number density offset
+ * @param He_index Particle helium fraction index
+ * @param d_He Particle helium fraction offset
+ * @param He_reion_heat Heating due to helium reionization
+ * (only depends on redshift, so passed as parameter)
+ * @param p #part structure
+ * @param cosmo #cosmology structure
+ * @param cooling #cooling_function_data structure
+ * @param phys_const #phys_const data structure
+ * @param abundance_ratio Array of ratios of metal abundance to solar
+ * @param dt timestep
+ * @param bisection_flag Flag to identify if scheme failed to converge
+ */
+INLINE static float newton_iter(
+    float logu_init, double u_ini, int n_H_index, float d_n_H, int He_index,
+    float d_He, float He_reion_heat, struct part *restrict p,
+    const struct cosmology *restrict cosmo,
+    const struct cooling_function_data *restrict cooling,
+    const struct phys_const *restrict phys_const,
+    const float abundance_ratio[chemistry_element_count + 2], float dt,
+    int *bisection_flag) {
+
+  double logu, logu_old;
+  double dLambdaNet_du = 0.0, LambdaNet;
+
+  /* table bounds */
+  const float log_table_bound_high =
+      (cooling->Therm[eagle_cooling_N_temperature - 1] - 0.05) / M_LOG10E;
+  const float log_table_bound_low = (cooling->Therm[0] + 0.05) / M_LOG10E;
+
+  /* convert Hydrogen mass fraction in Hydrogen number density */
+  const float XH = p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+  const double n_H =
+      hydro_get_physical_density(p, cosmo) * XH / phys_const->const_proton_mass;
+  const double n_H_cgs = n_H * cooling->number_density_to_cgs;
+
+  /* compute ratefact = n_H * n_H / rho; Might lead to round-off error:
+   * replaced by equivalent expression below */
+  const double ratefact_cgs = n_H_cgs * XH * cooling->inv_proton_mass_cgs;
+
+  logu_old = logu_init;
+  logu = logu_old;
+  int i = 0;
+
+  float LambdaNet_old = 0;
+  LambdaNet = 0;
+  do /* iterate to convergence */
+  {
+    logu_old = logu;
+    LambdaNet_old = LambdaNet;
+    LambdaNet = (He_reion_heat / (dt * ratefact_cgs)) +
+                eagle_cooling_rate(logu_old, cosmo->z, n_H_cgs, abundance_ratio,
+                                   n_H_index, d_n_H, He_index, d_He, cooling,
+                                   &dLambdaNet_du);
+
+    /* Newton iteration. For details on how the cooling equation is integrated
+     * see documentation in theory/Cooling/ */
+    logu = logu_old - (1.0 - u_ini * exp(-logu_old) -
+                       LambdaNet * ratefact_cgs * dt * exp(-logu_old)) /
+                          (1.0 - dLambdaNet_du * ratefact_cgs * dt);
+    /* Check if first step passes over equilibrium solution, if it does adjust
+     * next guess */
+    if (i == 1 && LambdaNet_old * LambdaNet < 0) logu = newton_log_u_guess_cgs;
+
+    /* check whether iterations go within about 10% of the table bounds,
+     * if they do default to bisection method */
+    if (logu > log_table_bound_high) {
+      i = newton_max_iterations;
+      break;
+    } else if (logu < log_table_bound_low) {
+      i = newton_max_iterations;
+      break;
+    }
+
+    i++;
+  } while (fabs(logu - logu_old) > newton_tolerance &&
+           i < newton_max_iterations);
+  if (i >= newton_max_iterations) {
+    /* flag to trigger bisection scheme */
+    *bisection_flag = 1;
+  }
+
+  return logu;
+}
+
+/**
+ * @brief Bisection integration scheme
+ *
+ * @param u_ini_cgs Internal energy at beginning of hydro step in CGS.
+ * @param n_H_cgs Hydrogen number density in CGS.
+ * @param redshift Current redshift.
+ * @param n_H_index Particle hydrogen number density index.
+ * @param d_n_H Particle hydrogen number density offset.
+ * @param He_index Particle helium fraction index.
+ * @param d_He Particle helium fraction offset.
+ * @param Lambda_He_reion_cgs Cooling rate coming from He reionization.
+ * @param ratefact_cgs Multiplication factor to get a cooling rate.
+ * @param cooling #cooling_function_data structure.
+ * @param abundance_ratio Array of ratios of metal abundance to solar.
+ * @param dt_cgs timestep in CGS.
+ * @param ID ID of the particle (for debugging).
+ */
+INLINE static double bisection_iter(
+    const double u_ini_cgs, const double n_H_cgs, const double redshift,
+    int n_H_index, float d_n_H, int He_index, float d_He,
+    double Lambda_He_reion_cgs, double ratefact_cgs,
+    const struct cooling_function_data *restrict cooling,
+    const float abundance_ratio[chemistry_element_count + 2], double dt_cgs,
+    long long ID) {
+
+  /* Bracketing */
+  double u_lower_cgs = u_ini_cgs;
+  double u_upper_cgs = u_ini_cgs;
+
+  /*************************************/
+  /* Let's get a first guess           */
+  /*************************************/
+
+  double LambdaNet_cgs =
+      Lambda_He_reion_cgs +
+      eagle_cooling_rate(log(u_ini_cgs), redshift, n_H_cgs, abundance_ratio,
+                         n_H_index, d_n_H, He_index, d_He, cooling,
+                         /*dLambdaNet_du=*/NULL);
+
+  /*************************************/
+  /* Let's try to bracket the solution */
+  /*************************************/
+
+  if (LambdaNet_cgs < 0) {
+
+    /* we're cooling! */
+    u_lower_cgs /= bracket_factor;
+    u_upper_cgs *= bracket_factor;
+
+    /* Compute a new rate */
+    LambdaNet_cgs =
+        Lambda_He_reion_cgs +
+        eagle_cooling_rate(log(u_lower_cgs), redshift, n_H_cgs, abundance_ratio,
+                           n_H_index, d_n_H, He_index, d_He, cooling,
+                           /*dLambdaNet_du=*/NULL);
+
+    int i = 0;
+    while (u_lower_cgs - u_ini_cgs - LambdaNet_cgs * ratefact_cgs * dt_cgs >
+               0 &&
+           i < bisection_max_iterations) {
+
+      u_lower_cgs /= bracket_factor;
+      u_upper_cgs /= bracket_factor;
+
+      /* Compute a new rate */
+      LambdaNet_cgs = Lambda_He_reion_cgs +
+                      eagle_cooling_rate(log(u_lower_cgs), redshift, n_H_cgs,
+                                         abundance_ratio, n_H_index, d_n_H,
+                                         He_index, d_He, cooling,
+                                         /*dLambdaNet_du=*/NULL);
+      i++;
+    }
+
+    if (i >= bisection_max_iterations) {
+      error(
+          "particle %llu exceeded max iterations searching for bounds when "
+          "cooling",
+          ID);
+    }
+  } else {
+
+    /* we are heating! */
+    u_lower_cgs /= bracket_factor;
+    u_upper_cgs *= bracket_factor;
+
+    /* Compute a new rate */
+    LambdaNet_cgs =
+        Lambda_He_reion_cgs +
+        eagle_cooling_rate(log(u_upper_cgs), redshift, n_H_cgs, abundance_ratio,
+                           n_H_index, d_n_H, He_index, d_He, cooling,
+                           /*dLambdaNet_du=*/NULL);
+
+    int i = 0;
+    while (u_upper_cgs - u_ini_cgs - LambdaNet_cgs * ratefact_cgs * dt_cgs <
+               0 &&
+           i < bisection_max_iterations) {
+
+      u_lower_cgs *= bracket_factor;
+      u_upper_cgs *= bracket_factor;
+
+      /* Compute a new rate */
+      LambdaNet_cgs = Lambda_He_reion_cgs +
+                      eagle_cooling_rate(log(u_upper_cgs), redshift, n_H_cgs,
+                                         abundance_ratio, n_H_index, d_n_H,
+                                         He_index, d_He, cooling,
+                                         /*dLambdaNet_du=*/NULL);
+      i++;
+    }
+
+    if (i >= bisection_max_iterations) {
+      error(
+          "particle %llu exceeded max iterations searching for bounds when "
+          "heating",
+          ID);
+    }
+  }
+
+  /********************************************/
+  /* We now have an upper and lower bound.    */
+  /* Let's iterate by reducing the bracketing */
+  /********************************************/
+
+  /* bisection iteration */
+  int i = 0;
+  double u_next_cgs;
+
+  do {
+
+    /* New guess */
+    u_next_cgs = 0.5 * (u_lower_cgs + u_upper_cgs);
+
+    /* New rate */
+    LambdaNet_cgs =
+        Lambda_He_reion_cgs +
+        eagle_cooling_rate(log(u_next_cgs), redshift, n_H_cgs, abundance_ratio,
+                           n_H_index, d_n_H, He_index, d_He, cooling,
+                           /*dLambdaNet_du=*/NULL);
+
+    /* Where do we go next? */
+    if (u_next_cgs - u_ini_cgs - LambdaNet_cgs * ratefact_cgs * dt_cgs > 0.0) {
+      u_upper_cgs = u_next_cgs;
+    } else {
+      u_lower_cgs = u_next_cgs;
+    }
+
+    i++;
+  } while (fabs(u_upper_cgs - u_lower_cgs) / u_next_cgs > bisection_tolerance &&
+           i < bisection_max_iterations);
+
+  if (i >= bisection_max_iterations)
+    error("Particle id %llu failed to converge", ID);
+
+  return u_upper_cgs;
+}
+
+/**
+ * @brief Apply the cooling function to a particle.
+ *
+ * We want to compute u_new such that u_new = u_old + dt * du/dt(u_new, X),
+ * where X stands for the metallicity, density and redshift. These are
+ * kept constant.
+ *
+ * We first compute du/dt(u_old). If dt * du/dt(u_old) is small enough, we
+ * use an explicit integration and use this as our solution.
+ *
+ * Otherwise, we try to find a solution to the implicit time-integration
+ * problem. This leads to the root-finding problem:
+ *
+ * f(u_new) = u_new - u_old - dt * du/dt(u_new, X) = 0
+ *
+ * We first try a few Newton-Raphson iteration if it does not converge, we
+ * revert to a bisection scheme.
+ *
+ * This is done by first bracketing the solution and then iterating
+ * towards the solution by reducing the window down to a certain tolerance.
+ * Note there is always at least one solution since
+ * f(+inf) is < 0 and f(-inf) is > 0.
+ *
+ * @param phys_const The physical constants in internal units.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_properties the hydro_props struct
+ * @param floor_props Properties of the entropy floor.
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data.
+ * @param dt The cooling time-step of this particle.
+ * @param dt_therm The hydro time-step of this particle.
+ */
+void cooling_cool_part(const struct phys_const *phys_const,
+                       const struct unit_system *us,
+                       const struct cosmology *cosmo,
+                       const struct hydro_props *hydro_properties,
+                       const struct entropy_floor_properties *floor_props,
+                       const struct cooling_function_data *cooling,
+                       struct part *restrict p, struct xpart *restrict xp,
+                       const float dt, const float dt_therm) {
+
+  /* No cooling happens over zero time */
+  if (dt == 0.) return;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (cooling->Redshifts == NULL)
+    error(
+        "Cooling function has not been initialised. Did you forget the "
+        "--cooling runtime flag?");
+#endif
+
+  /* Get internal energy at the last kick step */
+  const float u_start = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Get the change in internal energy due to hydro forces */
+  const float hydro_du_dt = hydro_get_physical_internal_energy_dt(p, cosmo);
+
+  /* Get internal energy at the end of the next kick step (assuming dt does not
+   * increase) */
+  double u_0 = (u_start + hydro_du_dt * dt_therm);
+
+  /* Check for minimal energy */
+  u_0 = max(u_0, hydro_properties->minimal_internal_energy);
+
+  /* Convert to CGS units */
+  const double u_start_cgs = u_start * cooling->internal_energy_to_cgs;
+  const double u_0_cgs = u_0 * cooling->internal_energy_to_cgs;
+  const double dt_cgs = dt * units_cgs_conversion_factor(us, UNIT_CONV_TIME);
+
+  /* Change in redshift over the course of this time-step
+     (See cosmology theory document for the derivation) */
+  const double delta_redshift = -dt * cosmo->H * cosmo->a_inv;
+
+  /* Get this particle's abundance ratios compared to solar
+   * Note that we need to add S and Ca that are in the tables but not tracked
+   * by the particles themselves.
+   * The order is [H, He, C, N, O, Ne, Mg, Si, S, Ca, Fe] */
+  float abundance_ratio[chemistry_element_count + 2];
+  abundance_ratio_to_solar(p, cooling, abundance_ratio);
+
+  /* Get the Hydrogen and Helium mass fractions */
+  const float XH = p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+  const float XHe = p->chemistry_data.metal_mass_fraction[chemistry_element_He];
+
+  /* Get the Helium mass fraction. Note that this is He / (H + He), i.e. a
+   * metal-free Helium mass fraction as per the Wiersma+08 definition */
+  const float HeFrac = XHe / (XH + XHe);
+
+  /* convert Hydrogen mass fraction into Hydrogen number density */
+  const double n_H =
+      hydro_get_physical_density(p, cosmo) * XH / phys_const->const_proton_mass;
+  const double n_H_cgs = n_H * cooling->number_density_to_cgs;
+
+  /* ratefact = n_H * n_H / rho; Might lead to round-off error: replaced by
+   * equivalent expression  below */
+  const double ratefact_cgs = n_H_cgs * (XH * cooling->inv_proton_mass_cgs);
+
+  /* compute hydrogen number density and helium fraction table indices and
+   * offsets (These are fixed for any value of u, so no need to recompute them)
+   */
+  int He_index, n_H_index;
+  float d_He, d_n_H;
+  get_index_1d(cooling->HeFrac, eagle_cooling_N_He_frac, HeFrac, &He_index,
+               &d_He);
+  get_index_1d(cooling->nH, eagle_cooling_N_density, log10(n_H_cgs), &n_H_index,
+               &d_n_H);
+
+  /* Start by computing the cooling (heating actually) rate from Helium
+     re-ionization as this needs to be added on no matter what */
+
+  /* Get helium and hydrogen reheating term */
+  const double Helium_reion_heat_cgs = eagle_helium_reionization_extraheat(
+      cooling->z_index, delta_redshift, cooling);
+
+  /* Convert this into a rate */
+  const double Lambda_He_reion_cgs =
+      Helium_reion_heat_cgs / (dt_cgs * ratefact_cgs);
+
+  /* Let's compute the internal energy at the end of the step */
+  double u_final_cgs;
+
+  /* First try an explicit integration (note we ignore the derivative) */
+  const double LambdaNet_cgs =
+      Lambda_He_reion_cgs + eagle_cooling_rate(log(u_0_cgs), cosmo->z, n_H_cgs,
+                                               abundance_ratio, n_H_index,
+                                               d_n_H, He_index, d_He, cooling,
+                                               /*dLambdaNet_du=*/NULL);
+
+  /* if cooling rate is small, take the explicit solution */
+  if (fabs(ratefact_cgs * LambdaNet_cgs * dt_cgs) <
+      explicit_tolerance * u_0_cgs) {
+
+    u_final_cgs = u_0_cgs + ratefact_cgs * LambdaNet_cgs * dt_cgs;
+
+  } else {
+
+    int bisection_flag = 1;
+
+    // MATTHIEU: TO DO restore the Newton-Raphson scheme
+    if (0 && cooling->newton_flag) {
+
+      /* Ok, try a Newton-Raphson scheme instead */
+      double log_u_final_cgs =
+          newton_iter(log(u_0_cgs), u_0_cgs, n_H_index, d_n_H, He_index, d_He,
+                      Lambda_He_reion_cgs, p, cosmo, cooling, phys_const,
+                      abundance_ratio, dt_cgs, &bisection_flag);
+
+      /* Check if newton scheme sent us to a higher energy despite being in
+         a  cooling regime If it did try newton scheme with a better guess.
+         (Guess internal energy near equilibrium solution).  */
+      if (LambdaNet_cgs < 0 && log_u_final_cgs > log(u_0_cgs)) {
+        bisection_flag = 0;
+        log_u_final_cgs =
+            newton_iter(newton_log_u_guess_cgs, u_0_cgs, n_H_index, d_n_H,
+                        He_index, d_He, Lambda_He_reion_cgs, p, cosmo, cooling,
+                        phys_const, abundance_ratio, dt_cgs, &bisection_flag);
+      }
+
+      u_final_cgs = exp(log_u_final_cgs);
+    }
+
+    /* Alright, all else failed, let's bisect */
+    if (bisection_flag || !(cooling->newton_flag)) {
+      u_final_cgs =
+          bisection_iter(u_0_cgs, n_H_cgs, cosmo->z, n_H_index, d_n_H, He_index,
+                         d_He, Lambda_He_reion_cgs, ratefact_cgs, cooling,
+                         abundance_ratio, dt_cgs, p->id);
+    }
+  }
+
+  /* Expected change in energy over the next kick step
+     (assuming no change in dt) */
+  const double delta_u_cgs = u_final_cgs - u_start_cgs;
+
+  /* Convert back to internal units */
+  double delta_u = delta_u_cgs * cooling->internal_energy_from_cgs;
+
+  /* We now need to check that we are not going to go below any of the limits */
+
+  /* Limit imposed by the entropy floor */
+  const double A_floor = entropy_floor(p, cosmo, floor_props);
+  const double rho = hydro_get_physical_density(p, cosmo);
+  const double u_floor = gas_internal_energy_from_entropy(rho, A_floor);
+
+  /* Absolute minimum */
+  const double u_minimal = hydro_properties->minimal_internal_energy;
+
+  /* Largest of both limits */
+  const double u_limit = max(u_minimal, u_floor);
+
+  /* First, check whether we may end up below the minimal energy after
+   * this step 1/2 kick + another 1/2 kick that could potentially be for
+   * a time-step twice as big. We hence check for 1.5 delta_u. */
+  if (u_start + 1.5 * delta_u < u_limit) {
+    delta_u = (u_limit - u_start) / 1.5;
+  }
+
+  /* Second, check whether the energy used in the prediction could get negative.
+   * We need to check for the 1/2 dt kick followed by a full time-step drift
+   * that could potentially be for a time-step twice as big. We hence check
+   * for 2.5 delta_u but this time against 0 energy not the minimum.
+   * To avoid numerical rounding bringing us below 0., we add a tiny tolerance.
+   */
+  if (u_start + 2.5 * delta_u < 0.) {
+    delta_u = -u_start / (2.5 + rounding_tolerance);
+  }
+
+  /* Turn this into a rate of change (including cosmology term) */
+  const float cooling_du_dt = delta_u / dt_therm;
+
+  /* Update the internal energy time derivative */
+  hydro_set_physical_internal_energy_dt(p, cosmo, cooling_du_dt);
+
+  /* Store the radiated energy */
+  xp->cooling_data.radiated_energy -= hydro_get_mass(p) * cooling_du_dt * dt;
+}
+
+/**
+ * @brief Computes the cooling time-step.
+ *
+ * The time-step is not set by the properties of cooling.
+ *
+ * @param cooling The #cooling_function_data used in the run.
+ * @param phys_const #phys_const data struct.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology struct.
+ * @param hydro_props the properties of the hydro scheme.
+ * @param p #part data.
+ * @param xp extended particle data.
+ */
+__attribute__((always_inline)) INLINE float cooling_timestep(
+    const struct cooling_function_data *restrict cooling,
+    const struct phys_const *restrict phys_const,
+    const struct cosmology *restrict cosmo,
+    const struct unit_system *restrict us,
+    const struct hydro_props *hydro_props, const struct part *restrict p,
+    const struct xpart *restrict xp) {
+
+  return FLT_MAX;
+}
+
+/**
+ * @brief Sets the cooling properties of the (x-)particles to a valid start
+ * state.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+__attribute__((always_inline)) INLINE void cooling_first_init_part(
+    const struct phys_const *restrict phys_const,
+    const struct unit_system *restrict us,
+    const struct cosmology *restrict cosmo,
+    const struct cooling_function_data *restrict cooling,
+    const struct part *restrict p, struct xpart *restrict xp) {
+
+  xp->cooling_data.radiated_energy = 0.f;
+}
+
+/**
+ * @brief Compute the temperature of a #part based on the cooling function.
+ *
+ * We use the Temperature table of the Wiersma+08 set. This computes the
+ * equilibirum temperature of a gas for a given redshift, Hydrogen density,
+ * internal energy per unit mass and Helium fraction.
+ *
+ * The temperature returned is consistent with the cooling rates.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+float cooling_get_temperature(
+    const struct phys_const *restrict phys_const,
+    const struct hydro_props *restrict hydro_props,
+    const struct unit_system *restrict us,
+    const struct cosmology *restrict cosmo,
+    const struct cooling_function_data *restrict cooling,
+    const struct part *restrict p, const struct xpart *restrict xp) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (cooling->Redshifts == NULL)
+    error(
+        "Cooling function has not been initialised. Did you forget the "
+        "--temperature runtime flag?");
+#endif
+
+  /* Get physical internal energy */
+  const float u = hydro_get_physical_internal_energy(p, xp, cosmo);
+  const double u_cgs = u * cooling->internal_energy_to_cgs;
+
+  /* Get the Hydrogen and Helium mass fractions */
+  const float XH = p->chemistry_data.metal_mass_fraction[chemistry_element_H];
+  const float XHe = p->chemistry_data.metal_mass_fraction[chemistry_element_He];
+
+  /* Get the Helium mass fraction. Note that this is He / (H + He), i.e. a
+   * metal-free Helium mass fraction as per the Wiersma+08 definition */
+  const float HeFrac = XHe / (XH + XHe);
+
+  /* Convert Hydrogen mass fraction into Hydrogen number density */
+  const float rho = hydro_get_physical_density(p, cosmo);
+  const double n_H = rho * XH / phys_const->const_proton_mass;
+  const double n_H_cgs = n_H * cooling->number_density_to_cgs;
+
+  /* compute hydrogen number density and helium fraction table indices and
+   * offsets */
+  int He_index, n_H_index;
+  float d_He, d_n_H;
+  get_index_1d(cooling->HeFrac, eagle_cooling_N_He_frac, HeFrac, &He_index,
+               &d_He);
+  get_index_1d(cooling->nH, eagle_cooling_N_density, log10(n_H_cgs), &n_H_index,
+               &d_n_H);
+
+  /* Compute the log10 of the temperature by interpolating the table */
+  const double log_10_T = eagle_convert_u_to_temp(
+      log10(u_cgs), cosmo->z, /*compute_dT_du=*/0, /*dT_du=*/NULL, n_H_index,
+      He_index, d_n_H, d_He, cooling);
+
+  /* Undo the log! */
+  return exp10(log_10_T);
+}
+
+/**
+ * @brief Returns the total radiated energy by this particle.
+ *
+ * @param xp #xpart data struct
+ */
+__attribute__((always_inline)) INLINE float cooling_get_radiated_energy(
+    const struct xpart *restrict xp) {
+
+  return xp->cooling_data.radiated_energy;
+}
+
+/**
+ * @brief Initialises properties stored in the cooling_function_data struct
+ *
+ * @param parameter_file The parsed parameter file
+ * @param us Internal system of units data structure
+ * @param phys_const #phys_const data structure
+ * @param cooling #cooling_function_data struct to initialize
+ */
+void cooling_init_backend(struct swift_params *parameter_file,
+                          const struct unit_system *us,
+                          const struct phys_const *phys_const,
+                          struct cooling_function_data *cooling) {
+
+  /* read some parameters */
+  parser_get_param_string(parameter_file, "EAGLECooling:dir_name",
+                          cooling->cooling_table_path);
+  cooling->H_reion_z =
+      parser_get_param_float(parameter_file, "EAGLECooling:H_reion_z");
+  cooling->He_reion_z_centre =
+      parser_get_param_float(parameter_file, "EAGLECooling:He_reion_z_centre");
+  cooling->He_reion_z_sigma =
+      parser_get_param_float(parameter_file, "EAGLECooling:He_reion_z_sigma");
+  cooling->He_reion_heat_cgs =
+      parser_get_param_float(parameter_file, "EAGLECooling:He_reion_eV_p_H");
+
+  /* Optional parameters to correct the abundances */
+  cooling->Ca_over_Si_ratio_in_solar = parser_get_opt_param_float(
+      parameter_file, "EAGLECooling:Ca_over_Si_in_solar", 1.f);
+  cooling->S_over_Si_ratio_in_solar = parser_get_opt_param_float(
+      parameter_file, "EAGLECooling:S_over_Si_in_solar", 1.f);
+
+  /* Convert to cgs (units used internally by the cooling routines) */
+  cooling->He_reion_heat_cgs *=
+      phys_const->const_electron_volt *
+      units_cgs_conversion_factor(us, UNIT_CONV_ENERGY);
+
+  /* Read in the list of redshifts */
+  get_cooling_redshifts(cooling);
+
+  /* Read in cooling table header */
+  char fname[eagle_table_path_name_length + 12];
+  sprintf(fname, "%sz_0.000.hdf5", cooling->cooling_table_path);
+  read_cooling_header(fname, cooling);
+
+  /* Allocate space for cooling tables */
+  allocate_cooling_tables(cooling);
+
+  /* Compute conversion factors */
+  cooling->internal_energy_to_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
+  cooling->internal_energy_from_cgs = 1. / cooling->internal_energy_to_cgs;
+  cooling->number_density_to_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_NUMBER_DENSITY);
+
+  /* Store some constants in CGS units */
+  const double proton_mass_cgs =
+      phys_const->const_proton_mass *
+      units_cgs_conversion_factor(us, UNIT_CONV_MASS);
+  cooling->inv_proton_mass_cgs = 1. / proton_mass_cgs;
+  cooling->T_CMB_0 = phys_const->const_T_CMB_0 *
+                     units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE);
+
+  /* Compute the coefficient at the front of the Compton cooling expression */
+  const double radiation_constant =
+      4. * phys_const->const_stefan_boltzmann / phys_const->const_speed_light_c;
+  const double compton_coefficient =
+      4. * radiation_constant * phys_const->const_thomson_cross_section *
+      phys_const->const_boltzmann_k /
+      (phys_const->const_electron_mass * phys_const->const_speed_light_c);
+  const float dimension_coefficient[5] = {1, 2, -3, 0, -5};
+
+  /* This should be ~1.0178085e-37 g cm^2 s^-3 K^-5 */
+  const double compton_coefficient_cgs =
+      compton_coefficient *
+      units_general_cgs_conversion_factor(us, dimension_coefficient);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  const double expected_compton_coefficient_cgs = 1.0178085e-37;
+  if (fabs(compton_coefficient_cgs - expected_compton_coefficient_cgs) /
+          expected_compton_coefficient_cgs >
+      0.01)
+    error("compton coefficient incorrect.");
+#endif
+
+  /* And now the Compton rate */
+  cooling->compton_rate_cgs = compton_coefficient_cgs * cooling->T_CMB_0 *
+                              cooling->T_CMB_0 * cooling->T_CMB_0 *
+                              cooling->T_CMB_0;
+
+  /* Set the redshift indices to invalid values */
+  cooling->z_index = -10;
+
+  /* set previous_z_index and to last value of redshift table*/
+  cooling->previous_z_index = eagle_cooling_N_redshifts - 2;
+
+  /* Check if we are running with the newton scheme */
+  cooling->newton_flag = parser_get_opt_param_int(
+      parameter_file, "EAGLECooling:newton_integration", 0);
+}
+
+/**
+ * @brief Restore cooling tables (if applicable) after
+ * restart
+ *
+ * @param cooling the #cooling_function_data structure
+ * @param cosmo #cosmology structure
+ */
+void cooling_restore_tables(struct cooling_function_data *cooling,
+                            const struct cosmology *cosmo) {
+
+  /* Read redshifts */
+  get_cooling_redshifts(cooling);
+
+  /* Read cooling header */
+  char fname[eagle_table_path_name_length + 12];
+  sprintf(fname, "%sz_0.000.hdf5", cooling->cooling_table_path);
+  read_cooling_header(fname, cooling);
+
+  /* Allocate memory for the tables */
+  allocate_cooling_tables(cooling);
+
+  /* Force a re-read of the cooling tables */
+  cooling->z_index = -10;
+  cooling->previous_z_index = eagle_cooling_N_redshifts - 2;
+  cooling_update(cosmo, cooling);
+}
+
+/**
+ * @brief Prints the properties of the cooling model to stdout.
+ *
+ * @param cooling #cooling_function_data struct.
+ */
+void cooling_print_backend(const struct cooling_function_data *cooling) {
+
+  message("Cooling function is 'EAGLE'.");
+}
+
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * We simply free all the arrays.
+ *
+ * @param cooling the cooling data structure.
+ */
+void cooling_clean(struct cooling_function_data *cooling) {
+
+  /* Free the side arrays */
+  free(cooling->Redshifts);
+  free(cooling->nH);
+  free(cooling->Temp);
+  free(cooling->HeFrac);
+  free(cooling->Therm);
+  free(cooling->SolarAbundances);
+  free(cooling->SolarAbundances_inv);
+
+  /* Free the tables */
+  free(cooling->table.metal_heating);
+  free(cooling->table.electron_abundance);
+  free(cooling->table.temperature);
+  free(cooling->table.H_plus_He_heating);
+  free(cooling->table.H_plus_He_electron_abundance);
+}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+void cooling_struct_dump(const struct cooling_function_data *cooling,
+                         FILE *stream) {
+
+  /* To make sure everything is restored correctly, we zero all the pointers to
+     tables. If they are not restored correctly, we would crash after restart on
+     the first call to the cooling routines. Helps debugging. */
+  struct cooling_function_data cooling_copy = *cooling;
+  cooling_copy.Redshifts = NULL;
+  cooling_copy.nH = NULL;
+  cooling_copy.Temp = NULL;
+  cooling_copy.Therm = NULL;
+  cooling_copy.SolarAbundances = NULL;
+  cooling_copy.SolarAbundances_inv = NULL;
+  cooling_copy.table.metal_heating = NULL;
+  cooling_copy.table.H_plus_He_heating = NULL;
+  cooling_copy.table.H_plus_He_electron_abundance = NULL;
+  cooling_copy.table.temperature = NULL;
+  cooling_copy.table.electron_abundance = NULL;
+
+  restart_write_blocks((void *)&cooling_copy,
+                       sizeof(struct cooling_function_data), 1, stream,
+                       "cooling", "cooling function");
+}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Read the structure from the stream and restore the cooling tables by
+ * re-reading them.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+void cooling_struct_restore(struct cooling_function_data *cooling, FILE *stream,
+                            const struct cosmology *cosmo) {
+  restart_read_blocks((void *)cooling, sizeof(struct cooling_function_data), 1,
+                      stream, NULL, "cooling function");
+
+  cooling_restore_tables(cooling, cosmo);
+}
diff --git a/src/cooling/EAGLE/cooling.h b/src/cooling/EAGLE/cooling.h
index 1c56572856a88d763d5ef7ca77e14d378891a264..d95c75e58aecfd8fb4816f1e50c7a3f379a08e51 100644
--- a/src/cooling/EAGLE/cooling.h
+++ b/src/cooling/EAGLE/cooling.h
@@ -21,112 +21,65 @@
 
 /**
  * @file src/cooling/EAGLE/cooling.h
- * @brief EAGLE cooling function
+ * @brief EAGLE cooling function declarations
  */
 
-/* Config parameters. */
-#include "../config.h"
+/* Local includes. */
+#include "cooling_struct.h"
 
-/* Some standard headers. */
-#include <float.h>
-#include <math.h>
+struct part;
+struct xpart;
+struct cosmology;
+struct hydro_props;
+struct entropy_floor_properties;
 
-/* Local includes. */
-#include "error.h"
-#include "hydro.h"
-#include "parser.h"
-#include "part.h"
-#include "physical_constants.h"
-#include "units.h"
+void cooling_update(const struct cosmology *cosmo,
+                    struct cooling_function_data *cooling);
 
-/**
- * @brief Apply the cooling function to a particle.
- *
- * @param phys_const The physical constants in internal units.
- * @param us The internal system of units.
- * @param cosmo The current cosmological model.
- * @param cooling The #cooling_function_data used in the run.
- * @param p Pointer to the particle data.
- * @param xp Pointer to the extended particle data.
- * @param dt The time-step of this particle.
- */
-__attribute__((always_inline)) INLINE static void cooling_cool_part(
-    const struct phys_const* restrict phys_const,
-    const struct unit_system* restrict us,
-    const struct cosmology* restrict cosmo,
-    const struct cooling_function_data* restrict cooling,
-    struct part* restrict p, struct xpart* restrict xp, float dt) {}
+void cooling_cool_part(const struct phys_const *phys_const,
+                       const struct unit_system *us,
+                       const struct cosmology *cosmo,
+                       const struct hydro_props *hydro_properties,
+                       const struct entropy_floor_properties *floor_props,
+                       const struct cooling_function_data *cooling,
+                       struct part *restrict p, struct xpart *restrict xp,
+                       const float dt, const float dt_therm);
 
-/**
- * @brief Computes the cooling time-step.
- *
- * @param cooling The #cooling_function_data used in the run.
- * @param phys_const The physical constants in internal units.
- * @param us The internal system of units.
- * @param cosmo The current cosmological model.
- * @param p Pointer to the particle data.
- */
-__attribute__((always_inline)) INLINE static float cooling_timestep(
-    const struct cooling_function_data* restrict cooling,
-    const struct phys_const* restrict phys_const,
-    const struct cosmology* restrict cosmo,
-    const struct unit_system* restrict us, const struct part* restrict p) {
+float cooling_timestep(const struct cooling_function_data *restrict cooling,
+                       const struct phys_const *restrict phys_const,
+                       const struct cosmology *restrict cosmo,
+                       const struct unit_system *restrict us,
+                       const struct hydro_props *hydro_props,
+                       const struct part *restrict p,
+                       const struct xpart *restrict xp);
 
-  return FLT_MAX;
-}
+void cooling_first_init_part(
+    const struct phys_const *restrict phys_const,
+    const struct unit_system *restrict us,
+    const struct cosmology *restrict cosmo,
+    const struct cooling_function_data *restrict cooling,
+    const struct part *restrict p, struct xpart *restrict xp);
 
-/**
- * @brief Sets the cooling properties of the (x-)particles to a valid start
- * state.
- *
- * @param phys_const The physical constants in internal units.
- * @param us The internal system of units.
- * @param cosmo The current cosmological model.
- * @param cooling The properties of the cooling function.
- * @param p Pointer to the particle data.
- * @param xp Pointer to the extended particle data.
- */
-__attribute__((always_inline)) INLINE static void cooling_first_init_part(
-    const struct phys_const* restrict phys_const,
-    const struct unit_system* restrict us,
-    const struct cosmology* restrict cosmo,
-    const struct cooling_function_data* restrict cooling,
-    const struct part* restrict p, struct xpart* restrict xp) {}
+float cooling_get_temperature(
+    const struct phys_const *restrict phys_const,
+    const struct hydro_props *restrict hydro_props,
+    const struct unit_system *restrict us,
+    const struct cosmology *restrict cosmo,
+    const struct cooling_function_data *restrict cooling,
+    const struct part *restrict p, const struct xpart *restrict xp);
 
-/**
- * @brief Returns the total radiated energy by this particle.
- *
- * @param xp The extended particle data
- */
-__attribute__((always_inline)) INLINE static float cooling_get_radiated_energy(
-    const struct xpart* restrict xp) {
+float cooling_get_radiated_energy(const struct xpart *restrict xp);
 
-  return 0.f;
-}
+void cooling_init_backend(struct swift_params *parameter_file,
+                          const struct unit_system *us,
+                          const struct phys_const *phys_const,
+                          struct cooling_function_data *cooling);
 
-/**
- * @brief Initialises the cooling properties.
- *
- * @param parameter_file The parsed parameter file.
- * @param us The current internal system of units.
- * @param phys_const The physical constants in internal units.
- * @param cooling The cooling properties to initialize
- */
-static INLINE void cooling_init_backend(struct swift_params* parameter_file,
-                                        const struct unit_system* us,
-                                        const struct phys_const* phys_const,
-                                        struct cooling_function_data* cooling) {
-}
+void cooling_restore_tables(struct cooling_function_data *cooling,
+                            const struct cosmology *cosmo);
 
-/**
- * @brief Prints the properties of the cooling model to stdout.
- *
- * @param cooling The properties of the cooling function.
- */
-static INLINE void cooling_print_backend(
-    const struct cooling_function_data* cooling) {
+void cooling_print_backend(const struct cooling_function_data *cooling);
 
-  message("Cooling function is 'EAGLE'.");
-}
+void cooling_clean(struct cooling_function_data *data);
 
 #endif /* SWIFT_COOLING_EAGLE_H */
diff --git a/src/cooling/EAGLE/cooling_io.h b/src/cooling/EAGLE/cooling_io.h
index f98539605de5c231a821758e9bd8fdb89bd19a59..5508153afc094d84383893f55ac0362a6d427b24 100644
--- a/src/cooling/EAGLE/cooling_io.h
+++ b/src/cooling/EAGLE/cooling_io.h
@@ -23,24 +23,36 @@
 #include "../config.h"
 
 /* Local includes */
+#include "cooling.h"
 #include "io_properties.h"
 
 #ifdef HAVE_HDF5
 
 /**
- * @brief Writes the current model of SPH to the file
- * @param h_grpsph The HDF5 group in which to write
+ * @brief Writes the current model of cooling to the file.
+ *
+ * @param h_grp The HDF5 group in which to write
+ * @param cooling The #cooling_function_data
  */
 __attribute__((always_inline)) INLINE static void cooling_write_flavour(
-    hid_t h_grpsph) {
+    hid_t h_grp, const struct cooling_function_data* cooling) {
 
-  io_write_attribute_s(h_grpsph, "Cooling Model", "EAGLE");
+  io_write_attribute_s(h_grp, "Cooling Model", "EAGLE");
 }
 #endif
 
+INLINE static void convert_part_T(const struct engine* e, const struct part* p,
+                                  const struct xpart* xp, float* ret) {
+
+  ret[0] = cooling_get_temperature(e->physical_constants, e->hydro_properties,
+                                   e->internal_units, e->cosmology,
+                                   e->cooling_func, p, xp);
+}
+
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
+ * @param parts The particle array.
  * @param xparts The extended data particle array.
  * @param list The list of i/o properties to write.
  * @param cooling The #cooling_function_data
@@ -48,9 +60,13 @@ __attribute__((always_inline)) INLINE static void cooling_write_flavour(
  * @return Returns the number of fields to write.
  */
 __attribute__((always_inline)) INLINE static int cooling_write_particles(
-    const struct xpart* xparts, struct io_props* list,
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
     const struct cooling_function_data* cooling) {
-  return 0;
+
+  list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1,
+                                              UNIT_CONV_TEMPERATURE, parts,
+                                              xparts, convert_part_T);
+  return 1;
 }
 
 #endif /* SWIFT_COOLING_EAGLE_IO_H */
diff --git a/src/cooling/EAGLE/cooling_rates.h b/src/cooling/EAGLE/cooling_rates.h
new file mode 100644
index 0000000000000000000000000000000000000000..d315a5ba339956828505c5f48165abdd2b2e0486
--- /dev/null
+++ b/src/cooling/EAGLE/cooling_rates.h
@@ -0,0 +1,764 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#ifndef SWIFT_EAGLE_COOLING_RATES_H
+#define SWIFT_EAGLE_COOLING_RATES_H
+
+#include "../config.h"
+
+/* Local includes. */
+#include "cooling_tables.h"
+#include "exp10.h"
+#include "interpolate.h"
+
+/**
+ * @brief Compute ratio of mass fraction to solar mass fraction
+ * for each element carried by a given particle.
+ *
+ * The solar abundances are taken from the tables themselves.
+ *
+ * The EAGLE chemistry model does not track S and Ca. We assume
+ * that their abundance with respect to solar is the same as
+ * the ratio for Si.
+ * We optionally apply a correction if the user asked for a different
+ * ratio.
+ *
+ * We also re-order the elements such that they match the order of the
+ * tables. This is [H, He, C, N, O, Ne, Mg, Si, S, Ca, Fe].
+ *
+ * The solar abundances table (from the cooling struct) is arranged as
+ * [H, He, C, N, O, Ne, Mg, Si, S, Ca, Fe].
+ *
+ * @param p Pointer to #part struct.
+ * @param cooling #cooling_function_data struct.
+ * @param ratio_solar (return) Array of ratios to solar abundances.
+ */
+__attribute__((always_inline)) INLINE void abundance_ratio_to_solar(
+    const struct part *p, const struct cooling_function_data *cooling,
+    float ratio_solar[chemistry_element_count + 2]) {
+
+  ratio_solar[0] = p->chemistry_data.metal_mass_fraction[chemistry_element_H] *
+                   cooling->SolarAbundances_inv[0 /* H */];
+
+  ratio_solar[1] = p->chemistry_data.metal_mass_fraction[chemistry_element_He] *
+                   cooling->SolarAbundances_inv[1 /* He */];
+
+  ratio_solar[2] = p->chemistry_data.metal_mass_fraction[chemistry_element_C] *
+                   cooling->SolarAbundances_inv[2 /* C */];
+
+  ratio_solar[3] = p->chemistry_data.metal_mass_fraction[chemistry_element_N] *
+                   cooling->SolarAbundances_inv[3 /* N */];
+
+  ratio_solar[4] = p->chemistry_data.metal_mass_fraction[chemistry_element_O] *
+                   cooling->SolarAbundances_inv[4 /* O */];
+
+  ratio_solar[5] = p->chemistry_data.metal_mass_fraction[chemistry_element_Ne] *
+                   cooling->SolarAbundances_inv[5 /* Ne */];
+
+  ratio_solar[6] = p->chemistry_data.metal_mass_fraction[chemistry_element_Mg] *
+                   cooling->SolarAbundances_inv[6 /* Mg */];
+
+  ratio_solar[7] = p->chemistry_data.metal_mass_fraction[chemistry_element_Si] *
+                   cooling->SolarAbundances_inv[7 /* Si */];
+
+  /* For S, we use the same ratio as Si */
+  ratio_solar[8] = p->chemistry_data.metal_mass_fraction[chemistry_element_Si] *
+                   cooling->SolarAbundances_inv[7 /* Si */] *
+                   cooling->S_over_Si_ratio_in_solar;
+
+  /* For Ca, we use the same ratio as Si */
+  ratio_solar[9] = p->chemistry_data.metal_mass_fraction[chemistry_element_Si] *
+                   cooling->SolarAbundances_inv[7 /* Si */] *
+                   cooling->Ca_over_Si_ratio_in_solar;
+
+  ratio_solar[10] =
+      p->chemistry_data.metal_mass_fraction[chemistry_element_Fe] *
+      cooling->SolarAbundances_inv[10 /* Fe */];
+}
+
+/**
+ * @brief Computes the extra heat from Helium reionisation at a given redshift.
+ *
+ * We follow the implementation of Wiersma et al. 2009, MNRAS, 399, 574-600,
+ * section. 2. The calculation returns energy in CGS.
+ *
+ * Note that delta_z is negative.
+ *
+ * @param z The current redshift.
+ * @param delta_z The change in redhsift over the course of this time-step.
+ * @param cooling The #cooling_function_data used in the run.
+ * @return Helium reionization energy in CGS units.
+ */
+__attribute__((always_inline)) INLINE double
+eagle_helium_reionization_extraheat(
+    double z, double delta_z, const struct cooling_function_data *cooling) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (delta_z > 0.f) error("Invalid value for delta_z. Should be negative.");
+#endif
+
+  /* Recover the values we need */
+  const double z_centre = cooling->He_reion_z_centre;
+  const double z_sigma = cooling->He_reion_z_sigma;
+  const double heat_cgs = cooling->He_reion_heat_cgs;
+
+  double extra_heat = 0.;
+
+  /* Integral of the Gaussian between z and z - delta_z */
+  extra_heat += erf((z - delta_z - z_centre) / (M_SQRT2 * z_sigma));
+  extra_heat -= erf((z - z_centre) / (M_SQRT2 * z_sigma));
+
+  /* Multiply by the normalisation factor */
+  extra_heat *= heat_cgs * 0.5;
+
+  return extra_heat;
+}
+
+/**
+ * @brief Computes the log_10 of the temperature corresponding to a given
+ * internal energy, hydrogen number density, Helium fraction and redshift.
+ *
+ * Note that the redshift is implicitly passed in via the currently loaded
+ * tables in the #cooling_function_data.
+ *
+ * For the low-z case, we interpolate the flattened 4D table 'u_to_temp' that
+ * is arranged in the following way:
+ * - 1st dim: redshift, length = eagle_cooling_N_loaded_redshifts
+ * - 2nd dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 3rd dim: Helium fraction, length = eagle_cooling_N_He_frac
+ * - 4th dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * For the high-z case, we interpolate the flattened 3D table 'u_to_temp' that
+ * is arranged in the following way:
+ * - 1st dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 2nd dim: Helium fraction, length = eagle_cooling_N_He_frac
+ * - 3rd dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * @param log_10_u_cgs Log base 10 of internal energy in cgs.
+ * @param redshift Current redshift.
+ * @param n_H_index Index along the Hydrogen density dimension.
+ * @param He_index Index along the Helium fraction dimension.
+ * @param d_n_H Offset between Hydrogen density and table[n_H_index].
+ * @param d_He Offset between helium fraction and table[He_index].
+ * @param cooling #cooling_function_data structure.
+ *
+ * @param compute_dT_du Do we want to compute dT/du ?
+ * @param dT_du (return) The value of dT/du
+ *
+ * @return log_10 of the temperature.
+ */
+__attribute__((always_inline)) INLINE double eagle_convert_u_to_temp(
+    const double log_10_u_cgs, const float redshift, const int compute_dT_du,
+    float *dT_du, int n_H_index, int He_index, float d_n_H, float d_He,
+    const struct cooling_function_data *restrict cooling) {
+
+  /* Get index of u along the internal energy axis */
+  int u_index;
+  float d_u;
+  get_index_1d(cooling->Therm, eagle_cooling_N_temperature, log_10_u_cgs,
+               &u_index, &d_u);
+
+  /* Interpolate temperature table to return temperature for current
+   * internal energy (use 3D interpolation for high redshift table,
+   * otherwise 4D) */
+  float log_10_T;
+  if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+    log_10_T = interpolation_3d(cooling->table.temperature,   /* */
+                                n_H_index, He_index, u_index, /* */
+                                d_n_H, d_He, d_u,             /* */
+                                eagle_cooling_N_density,      /* */
+                                eagle_cooling_N_He_frac,      /* */
+                                eagle_cooling_N_temperature); /* */
+  } else {
+
+    log_10_T =
+        interpolation_4d(cooling->table.temperature,                  /* */
+                         /*z_index=*/0, n_H_index, He_index, u_index, /* */
+                         cooling->dz, d_n_H, d_He, d_u,               /* */
+                         eagle_cooling_N_loaded_redshifts,            /* */
+                         eagle_cooling_N_density,                     /* */
+                         eagle_cooling_N_He_frac,                     /* */
+                         eagle_cooling_N_temperature);                /* */
+  }
+
+  if (compute_dT_du) {
+
+    float log_10_T_high, log_10_T_low;
+
+    /* Interpolate temperature table to return temperature for internal energy
+     * at grid point above current internal energy for computing dT_du used for
+     * calculation of dlambda_du in cooling.c (use 3D interpolation for high
+     * redshift table, otherwise 4D) */
+    if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+      log_10_T_high = interpolation_3d(cooling->table.temperature,   /* */
+                                       n_H_index, He_index, u_index, /* */
+                                       d_n_H, d_He, /*delta_u=*/1.f, /* */
+                                       eagle_cooling_N_density,      /* */
+                                       eagle_cooling_N_He_frac,      /* */
+                                       eagle_cooling_N_temperature); /* */
+
+    } else {
+
+      log_10_T_high =
+          interpolation_4d(cooling->table.temperature,                  /* */
+                           /*z_index=*/0, n_H_index, He_index, u_index, /* */
+                           cooling->dz, d_n_H, d_He, /*delta_u=*/1.f,   /* */
+                           eagle_cooling_N_loaded_redshifts,            /* */
+                           eagle_cooling_N_density,                     /* */
+                           eagle_cooling_N_He_frac,                     /* */
+                           eagle_cooling_N_temperature);                /* */
+    }
+
+    /* Interpolate temperature table to return temperature for internal energy
+     * at grid point below current internal energy for computing dT_du used for
+     * calculation of dlambda_du in cooling.c (use 3D interpolation for high
+     * redshift table, otherwise 4D) */
+    if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+      log_10_T_low = interpolation_3d(cooling->table.temperature,   /* */
+                                      n_H_index, He_index, u_index, /* */
+                                      d_n_H, d_He, /*delta_u=*/0.f, /* */
+                                      eagle_cooling_N_density,      /* */
+                                      eagle_cooling_N_He_frac,      /* */
+                                      eagle_cooling_N_temperature); /* */
+
+    } else {
+
+      log_10_T_low =
+          interpolation_4d(cooling->table.temperature,                  /* */
+                           /*z_index=*/0, n_H_index, He_index, u_index, /* */
+                           cooling->dz, d_n_H, d_He, /*delta_u=*/0.f,   /* */
+                           eagle_cooling_N_loaded_redshifts,            /* */
+                           eagle_cooling_N_density,                     /* */
+                           eagle_cooling_N_He_frac,                     /* */
+                           eagle_cooling_N_temperature);                /* */
+    }
+
+    /* Calculate dT/du */
+    const float delta_u = exp(cooling->Therm[u_index + 1] * M_LN10) -
+                          exp(cooling->Therm[u_index] * M_LN10);
+    *dT_du =
+        (exp(M_LN10 * log_10_T_high) - exp(M_LN10 * log_10_T_low)) / delta_u;
+  }
+
+  /* Special case for temperatures below the start of the table */
+  if (u_index == 0 && d_u == 0.f) {
+
+    /* The temperature is multiplied by u / 10^T[0]
+     * where T[0] is the first entry in the table */
+    log_10_T += log_10_u_cgs - cooling->Temp[0];
+  }
+
+  return log_10_T;
+}
+
+/**
+ * @brief Compute the Compton cooling rate from the CMB at a given
+ * redshift, electron abundance, temperature and Hydrogen density.
+ *
+ * Uses an analytic formula.
+ *
+ * @param cooling The #cooling_function_data used in the run.
+ * @param redshift The current redshift.
+ * @param n_H_cgs The Hydrogen number density in CGS units.
+ * @param temperature The temperature.
+ * @param electron_abundance The electron abundance.
+ */
+__attribute__((always_inline)) INLINE double eagle_Compton_cooling_rate(
+    const struct cooling_function_data *cooling, const double redshift,
+    const double n_H_cgs, const double temperature,
+    const double electron_abundance) {
+
+  const double zp1 = 1. + redshift;
+  const double zp1p2 = zp1 * zp1;
+  const double zp1p4 = zp1p2 * zp1p2;
+
+  /* CMB temperature at this redshift */
+  const double T_CMB = cooling->T_CMB_0 * zp1;
+
+  /* Compton cooling rate */
+  return cooling->compton_rate_cgs * (temperature - T_CMB) * zp1p4 *
+         electron_abundance / n_H_cgs;
+}
+
+/**
+ * @brief Computes the cooling rate corresponding to a given internal energy,
+ * hydrogen number density, Helium fraction, redshift and metallicity from
+ * all the possible channels.
+ *
+ * 1) Metal-free cooling:
+ * We interpolate the flattened 4D table 'H_and_He_net_heating' that is
+ * arranged in the following way:
+ * - 1st dim: redshift, length = eagle_cooling_N_loaded_redshifts
+ * - 2nd dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 3rd dim: Helium fraction, length = eagle_cooling_N_He_frac
+ * - 4th dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * 2) Electron abundance
+ * We compute the electron abundance by interpolating the flattened 4d table
+ * 'H_and_He_electron_abundance' that is arranged in the following way:
+ * - 1st dim: redshift, length = eagle_cooling_N_loaded_redshifts
+ * - 2nd dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 3rd dim: Helium fraction, length = eagle_cooling_N_He_frac
+ * - 4th dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * 3) Compton cooling is applied via the analytic formula.
+ *
+ * 4) Solar electron abudance
+ * We compute the solar electron abundance by interpolating the flattened 3d
+ * table 'solar_electron_abundance' that is arranged in the following way:
+ * - 1st dim: redshift, length = eagle_cooling_N_loaded_redshifts
+ * - 2nd dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 3rd dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * 5) Metal-line cooling
+ * For each tracked element we interpolate the flattened 4D table
+ * 'table_metals_net_heating' that is arrange in the following way:
+ * - 1st dim: element, length = eagle_cooling_N_metal
+ * - 2nd dim: redshift, length = eagle_cooling_N_loaded_redshifts
+ * - 3rd dim: Hydrogen density, length = eagle_cooling_N_density
+ * - 4th dim: Internal energy, length = eagle_cooling_N_temperature
+ *
+ * Note that this is a fake 4D interpolation as we do not interpolate
+ * along the 1st dimension. We just do this once per element.
+ *
+ * Since only the temperature changes when cooling a given particle,
+ * the redshift, hydrogen number density and helium fraction indices
+ * and offsets passed in.
+ *
+ * If the arguement dlambda_du is non-NULL, the routine also
+ * calculates derivative of cooling rate with respect to internal
+ * energy.
+ *
+ * If the argument element_lambda is non-NULL, the routine also
+ * returns the cooling rate per element in the array.
+ *
+ * @param log10_u_cgs Log base 10 of internal energy per unit mass in CGS units.
+ * @param redshift The current redshift
+ * @param n_H_cgs The Hydrogen number density in CGS units.
+ * @param solar_ratio Array of ratios of particle metal abundances
+ * to solar metal abundances
+ *
+ * @param n_H_index Particle hydrogen number density index
+ * @param d_n_H Particle hydrogen number density offset
+ * @param He_index Particle helium fraction index
+ * @param d_He Particle helium fraction offset
+ * @param cooling Cooling data structure
+ *
+ * @param dlambda_du (return) Derivative of the cooling rate with respect to u.
+ * @param element_lambda (return) Cooling rate from each element
+ *
+ * @return The cooling rate
+ */
+INLINE static double eagle_metal_cooling_rate(
+    double log10_u_cgs, double redshift, double n_H_cgs,
+    const float solar_ratio[chemistry_element_count + 2], int n_H_index,
+    float d_n_H, int He_index, float d_He,
+    const struct cooling_function_data *restrict cooling, double *dlambda_du,
+    double *element_lambda) {
+
+#ifdef TO_BE_DONE
+  /* used for calculating dlambda_du */
+  double temp_lambda_high = 0, temp_lambda_low = 0;
+  double h_plus_he_electron_abundance_high = 0;
+  double h_plus_he_electron_abundance_low = 0;
+  double solar_electron_abundance_high = 0;
+  double solar_electron_abundance_low = 0;
+  double elem_cool_low = 0, elem_cool_high = 0;
+#endif
+
+  /* We only need dT_du if dLambda_du is non-NULL */
+  const int compute_dT_du = (dlambda_du != NULL) ? 1 : 0;
+
+  /* Temperature */
+  float dT_du = -1.f;
+  const double log_10_T =
+      eagle_convert_u_to_temp(log10_u_cgs, redshift, compute_dT_du, &dT_du,
+                              n_H_index, He_index, d_n_H, d_He, cooling);
+
+  /* Get index along temperature dimension of the tables */
+  int T_index;
+  float d_T;
+  get_index_1d(cooling->Temp, eagle_cooling_N_temperature, log_10_T, &T_index,
+               &d_T);
+
+#ifdef TO_BE_DONE
+  /* Difference between entries on the temperature table around u */
+  const float delta_T = exp(M_LN10 * cooling->Temp[T_index + 1]) -
+                        exp(M_LN10 * cooling->Temp[T_index]);
+#endif
+
+  /**********************/
+  /* Metal-free cooling */
+  /**********************/
+
+  double Lambda_free;
+
+  if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+    /* If we're using the high redshift tables then we don't interpolate
+     * in redshift */
+    Lambda_free = interpolation_3d(cooling->table.H_plus_He_heating, /* */
+                                   n_H_index, He_index, T_index,     /* */
+                                   d_n_H, d_He, d_T,                 /* */
+                                   eagle_cooling_N_density,          /* */
+                                   eagle_cooling_N_He_frac,          /* */
+                                   eagle_cooling_N_temperature);     /* */
+
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du. Pass in NULL pointer for
+     * dlambda_du in order to skip */
+    if (dlambda_du != NULL) {
+      temp_lambda_high = interpolation_3d(
+          cooling->table.H_plus_He_heating, n_H_index, He_index, T_index, d_n_h,
+          d_He, 1.f, cooling->N_nH, cooling->N_He, cooling->N_Temp);
+      temp_lambda_low = interpolation_3d(
+          cooling->table.H_plus_He_heating, n_H_index, He_index, T_index, d_n_h,
+          d_He, 0.f, cooling->N_nH, cooling->N_He, cooling->N_Temp);
+    }
+#endif
+
+  } else {
+
+    /* Using normal tables, have to interpolate in redshift */
+    Lambda_free =
+        interpolation_4d(cooling->table.H_plus_He_heating,            /* */
+                         /*z_index=*/0, n_H_index, He_index, T_index, /* */
+                         cooling->dz, d_n_H, d_He, d_T,               /* */
+                         eagle_cooling_N_loaded_redshifts,            /* */
+                         eagle_cooling_N_density,                     /* */
+                         eagle_cooling_N_He_frac,                     /* */
+                         eagle_cooling_N_temperature);                /* */
+
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du */
+    if (dlambda_du != NULL) {
+      temp_lambda_high =
+          interpolation_4d(cooling->table.H_plus_He_heating, 0, n_H_index,
+                           He_index, T_index, cooling->dz, d_n_h, d_He, 1.f, 2,
+                           cooling->N_nH, cooling->N_He, cooling->N_Temp);
+      temp_lambda_low =
+          interpolation_4d(cooling->table.H_plus_He_heating, 0, n_H_index,
+                           He_index, T_index, cooling->dz, d_n_h, d_He, 0.f, 2,
+                           cooling->N_nH, cooling->N_He, cooling->N_Temp);
+    }
+#endif
+  }
+
+#ifdef TO_BE_DONE
+  if (dlambda_du != NULL) {
+    *dlambda_du += (temp_lambda_high - temp_lambda_low) / delta_T * dT_du;
+  }
+#endif
+
+  /* If we're testing cooling rate contributions write to array */
+  if (element_lambda != NULL) {
+    element_lambda[0] = Lambda_free;
+  }
+
+  /**********************/
+  /* Electron abundance */
+  /**********************/
+
+  double H_plus_He_electron_abundance;
+
+  if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+    H_plus_He_electron_abundance =
+        interpolation_3d(cooling->table.H_plus_He_electron_abundance, /* */
+                         n_H_index, He_index, T_index,                /* */
+                         d_n_H, d_He, d_T,                            /* */
+                         eagle_cooling_N_density,                     /* */
+                         eagle_cooling_N_He_frac,                     /* */
+                         eagle_cooling_N_temperature);                /* */
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du. Pass in NULL pointer for
+     * dlambda_du in order to skip */
+
+    h_plus_he_electron_abundance_high =
+        interpolation_3d(cooling->table.H_plus_He_electron_abundance, n_H_index,
+                         He_index, T_index, d_n_h, d_He, 1.f, cooling->N_nH,
+                         cooling->N_He, cooling->N_Temp);
+    h_plus_he_electron_abundance_low =
+        interpolation_3d(cooling->table.H_plus_He_electron_abundance, n_H_index,
+                         He_index, T_index, d_n_h, d_He, 0.f, cooling->N_nH,
+                         cooling->N_He, cooling->N_Temp);
+
+#endif
+
+  } else {
+
+    H_plus_He_electron_abundance =
+        interpolation_4d(cooling->table.H_plus_He_electron_abundance, /* */
+                         /*z_index=*/0, n_H_index, He_index, T_index, /* */
+                         cooling->dz, d_n_H, d_He, d_T,               /* */
+                         eagle_cooling_N_loaded_redshifts,            /* */
+                         eagle_cooling_N_density,                     /* */
+                         eagle_cooling_N_He_frac,                     /* */
+                         eagle_cooling_N_temperature);                /* */
+
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du */
+    h_plus_he_electron_abundance_high =
+        interpolation_4d(cooling->table.H_plus_He_electron_abundance, 0,
+                         n_H_index, He_index, T_index, cooling->dz, d_n_h, d_He,
+                         1.f, 2, cooling->N_nH, cooling->N_He, cooling->N_Temp);
+    h_plus_he_electron_abundance_low =
+        interpolation_4d(cooling->table.H_plus_He_electron_abundance, 0,
+                         n_H_index, He_index, T_index, cooling->dz, d_n_h, d_He,
+                         0.f, 2, cooling->N_nH, cooling->N_He, cooling->N_Temp);
+#endif
+  }
+
+  /**********************/
+  /* Compton cooling    */
+  /**********************/
+
+  double Lambda_Compton = 0.;
+
+  /* Do we need to add the inverse Compton cooling? */
+  /* It is *not* stored in the tables before re-ionisation */
+  if ((redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) ||
+      (redshift > cooling->H_reion_z)) {
+
+    const double T = exp10(log_10_T);
+
+    /* Note the minus sign */
+    Lambda_Compton -= eagle_Compton_cooling_rate(cooling, redshift, n_H_cgs, T,
+                                                 H_plus_He_electron_abundance);
+  }
+
+  /* If we're testing cooling rate contributions write to array */
+  if (element_lambda != NULL) {
+    element_lambda[1] = Lambda_Compton;
+  }
+
+  /*******************************/
+  /* Solar electron abundance    */
+  /*******************************/
+
+  double solar_electron_abundance;
+
+  if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+    /* If we're using the high redshift tables then we don't interpolate
+     * in redshift */
+    solar_electron_abundance =
+        interpolation_2d(cooling->table.electron_abundance, /* */
+                         n_H_index, T_index,                /* */
+                         d_n_H, d_T,                        /* */
+                         eagle_cooling_N_density,           /* */
+                         eagle_cooling_N_temperature);      /* */
+
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du */
+    if (dlambda_du != NULL) {
+      solar_electron_abundance_high =
+          interpolation_2d(cooling->table.electron_abundance, n_H_index,
+                           T_index, d_n_h, 1.f, cooling->N_nH, cooling->N_Temp);
+      solar_electron_abundance_low =
+          interpolation_2d(cooling->table.electron_abundance, n_H_index,
+                           T_index, d_n_h, 0.f, cooling->N_nH, cooling->N_Temp);
+    }
+#endif
+
+  } else {
+
+    /* Using normal tables, have to interpolate in redshift */
+    solar_electron_abundance =
+        interpolation_3d(cooling->table.electron_abundance, /* */
+                         /*z_index=*/0, n_H_index, T_index, /* */
+                         cooling->dz, d_n_H, d_T,           /* */
+                         eagle_cooling_N_loaded_redshifts,  /* */
+                         eagle_cooling_N_density,           /* */
+                         eagle_cooling_N_temperature);      /* */
+
+#ifdef TO_BE_DONE
+    /* compute values at temperature gridpoints above and below input
+     * temperature for calculation of dlambda_du */
+    if (dlambda_du != NULL) {
+      solar_electron_abundance_high = interpolation_3d(
+          cooling->table.electron_abundance, 0, n_H_index, T_index, cooling->dz,
+          d_n_h, 1.f, 2, cooling->N_nH, cooling->N_Temp);
+      solar_electron_abundance_low = interpolation_3d(
+          cooling->table.electron_abundance, 0, n_H_index, T_index, cooling->dz,
+          d_n_h, 0.f, 2, cooling->N_nH, cooling->N_Temp);
+    }
+#endif
+  }
+
+  const double electron_abundance_ratio =
+      H_plus_He_electron_abundance / solar_electron_abundance;
+
+  /**********************/
+  /* Metal-line cooling */
+  /**********************/
+
+  /* for each element the cooling rate is multiplied by the ratio of H, He
+   * electron abundance to solar electron abundance then by the ratio of the
+   * particle metal abundance to solar metal abundance. */
+
+  double lambda_metal[eagle_cooling_N_metal + 2] = {0.};
+
+  if (redshift > cooling->Redshifts[eagle_cooling_N_redshifts - 1]) {
+
+    /* Loop over the metals (ignore H and He) */
+    for (int elem = 2; elem < eagle_cooling_N_metal + 2; elem++) {
+
+      if (solar_ratio[elem] > 0.) {
+
+        /* Note that we do not interpolate along the x-axis
+         * (element dimension) */
+        lambda_metal[elem] =
+            interpolation_3d_no_x(cooling->table.metal_heating,   /* */
+                                  elem - 2, n_H_index, T_index,   /* */
+                                  /*delta_elem=*/0.f, d_n_H, d_T, /* */
+                                  eagle_cooling_N_metal,          /* */
+                                  eagle_cooling_N_density,        /* */
+                                  eagle_cooling_N_temperature);   /* */
+
+        lambda_metal[elem] *= electron_abundance_ratio;
+        lambda_metal[elem] *= solar_ratio[elem];
+      }
+
+#ifdef TO_BE_DONE
+      /* compute values at temperature gridpoints above and below input
+       * temperature for calculation of dlambda_du */
+      if (dlambda_du != NULL) {
+        elem_cool_high = interpolation_3d_no_x(
+            cooling->table.metal_heating, elem, n_H_index, T_index, 0.f, d_n_h,
+            1.f, cooling->N_Elements, cooling->N_nH, cooling->N_Temp);
+
+        elem_cool_low = interpolation_3d_no_x(
+            cooling->table.metal_heating, elem, n_H_index, T_index, 0.f, d_n_h,
+            0.f, cooling->N_nH, cooling->N_Temp, cooling->N_Elements);
+
+        *dlambda_du += (elem_cool_high * h_plus_he_electron_abundance_high /
+                            solar_electron_abundance_high -
+                        elem_cool_low * h_plus_he_electron_abundance_low /
+                            solar_electron_abundance_low) /
+                       delta_T * dT_du * solar_ratio[elem + 2];
+      }
+#endif
+    }
+
+  } else {
+
+    /* Loop over the metals (ignore H and He) */
+    for (int elem = 2; elem < eagle_cooling_N_metal + 2; elem++) {
+
+      if (solar_ratio[elem] > 0.) {
+
+        /* Note that we do not interpolate along the x-axis
+         * (element dimension) */
+        lambda_metal[elem] = interpolation_4d_no_x(
+            cooling->table.metal_heating,                /* */
+            elem - 2, /*z_index=*/0, n_H_index, T_index, /* */
+            /*delta_elem=*/0.f, cooling->dz, d_n_H, d_T, /* */
+            eagle_cooling_N_metal,                       /* */
+            eagle_cooling_N_loaded_redshifts,            /* */
+            eagle_cooling_N_density,                     /* */
+            eagle_cooling_N_temperature);                /* */
+
+        lambda_metal[elem] *= electron_abundance_ratio;
+        lambda_metal[elem] *= solar_ratio[elem];
+      }
+
+#ifdef TO_BE_DONE
+      /* compute values at temperature gridpoints above and below input
+       * temperature for calculation of dlambda_du */
+      if (dlambda_du != NULL) {
+        elem_cool_high = interpolation_4d_no_x(
+            cooling->table.metal_heating, elem, 0, n_H_index, T_index, 0.,
+            cooling->dz, d_n_h, 1.f, cooling->N_Elements, 2, cooling->N_nH,
+            cooling->N_Temp);
+
+        elem_cool_low = interpolation_4d_no_x(
+            cooling->table.metal_heating, elem, 0, n_H_index, T_index, 0.,
+            cooling->dz, d_n_h, 0.f, cooling->N_Elements, 2, cooling->N_nH,
+            cooling->N_Temp);
+
+        *dlambda_du += (elem_cool_high * h_plus_he_electron_abundance_high /
+                            solar_electron_abundance_high -
+                        elem_cool_low * h_plus_he_electron_abundance_low /
+                            solar_electron_abundance_low) /
+                       delta_T * dT_du * solar_ratio[elem + 2];
+      }
+#endif
+    }
+  }
+
+  if (element_lambda != NULL) {
+    for (int elem = 2; elem < eagle_cooling_N_metal + 2; ++elem) {
+      element_lambda[elem] = lambda_metal[elem];
+    }
+  }
+
+  /* Sum up all the contributions */
+  double Lambda_net = Lambda_free + Lambda_Compton;
+  for (int elem = 2; elem < eagle_cooling_N_metal + 2; ++elem) {
+    Lambda_net += lambda_metal[elem];
+  }
+
+  return Lambda_net;
+}
+
+/**
+ * @brief Wrapper function used to calculate cooling rate and dLambda_du.
+ * Table indices and offsets for redshift, hydrogen number density and
+ * helium fraction are passed it so as to compute them only once per particle.
+ *
+ * @param log_u_cgs Natural log of internal energy per unit mass in CGS units.
+ * @param redshift The current redshift.
+ * @param n_H_cgs Hydrogen number density in CGS units.
+ * @param abundance_ratio Ratio of element abundance to solar.
+ *
+ * @param n_H_index Particle hydrogen number density index
+ * @param d_n_H Particle hydrogen number density offset
+ * @param He_index Particle helium fraction index
+ * @param d_He Particle helium fraction offset
+ * @param cooling #cooling_function_data structure
+ *
+ * @param dLambdaNet_du (return) Derivative of the cooling rate with respect to
+ * u.
+ *
+ * @return The cooling rate
+ */
+INLINE static double eagle_cooling_rate(
+    double log_u_cgs, double redshift, double n_H_cgs,
+    const float abundance_ratio[chemistry_element_count + 2], int n_H_index,
+    float d_n_H, int He_index, float d_He,
+    const struct cooling_function_data *restrict cooling,
+    double *dLambdaNet_du) {
+
+  return eagle_metal_cooling_rate(log_u_cgs / M_LN10, redshift, n_H_cgs,
+                                  abundance_ratio, n_H_index, d_n_H, He_index,
+                                  d_He, cooling, dLambdaNet_du,
+                                  /*element_lambda=*/NULL);
+}
+
+#endif /* SWIFT_EAGLE_COOLING_RATES_H */
diff --git a/src/cooling/EAGLE/cooling_struct.h b/src/cooling/EAGLE/cooling_struct.h
index 24c8b2088bf5b54134fde7a4a76ab3d2ae61c6ba..0922bf74461c222bd6485bdc07cc35edc462ddba 100644
--- a/src/cooling/EAGLE/cooling_struct.h
+++ b/src/cooling/EAGLE/cooling_struct.h
@@ -19,14 +19,119 @@
 #ifndef SWIFT_COOLING_STRUCT_EAGLE_H
 #define SWIFT_COOLING_STRUCT_EAGLE_H
 
+#define eagle_table_path_name_length 500
+
+/**
+ * @brief struct containing cooling tables
+ */
+struct cooling_tables {
+
+  /* array of heating rates due to metals */
+  float *metal_heating;
+
+  /* array of heating rates due to hydrogen and helium */
+  float *H_plus_He_heating;
+
+  /* array of electron abundances due to hydrogen and helium */
+  float *H_plus_He_electron_abundance;
+
+  /* array of temperatures */
+  float *temperature;
+
+  /* array of electron abundances due to metals */
+  float *electron_abundance;
+};
+
 /**
  * @brief Properties of the cooling function.
  */
-struct cooling_function_data {};
+struct cooling_function_data {
+
+  /*! Cooling tables */
+  struct cooling_tables table;
+
+  /*! Redshift bins */
+  float *Redshifts;
+
+  /*! Hydrogen number density bins */
+  float *nH;
+
+  /*! Temperature bins */
+  float *Temp;
+
+  /*! Helium fraction bins */
+  float *HeFrac;
+
+  /*! Internal energy bins */
+  float *Therm;
+
+  /*! Mass fractions of elements for solar abundances (from the tables) */
+  float *SolarAbundances;
+
+  /*! Inverse of the solar mass fractions */
+  float *SolarAbundances_inv;
+
+  /*! Filepath to the directory containing the HDF5 cooling tables */
+  char cooling_table_path[eagle_table_path_name_length];
+
+  /*! Redshit of H reionization */
+  float H_reion_z;
+
+  /*! Ca over Si abundance divided by the solar ratio for these elements */
+  float Ca_over_Si_ratio_in_solar;
+
+  /*! S over Si abundance divided by the solar ratio for these elements */
+  float S_over_Si_ratio_in_solar;
+
+  /*! Redshift of He reionization */
+  float He_reion_z_centre;
+
+  /*! Spread of the He reionization */
+  float He_reion_z_sigma;
+
+  /*! He reionization energy in CGS units */
+  float He_reion_heat_cgs;
+
+  /*! Internal energy conversion from internal units to CGS (for quick access)
+   */
+  double internal_energy_to_cgs;
+
+  /*! Internal energy conversion from CGS to internal units (for quick access)
+   */
+  double internal_energy_from_cgs;
+
+  /*! Number density conversion from internal units to CGS (for quick access) */
+  double number_density_to_cgs;
+
+  /*! Inverse of proton mass in cgs (for quick access) */
+  double inv_proton_mass_cgs;
+
+  /*! Temperatur of the CMB at present day (for quick access) */
+  double T_CMB_0;
+
+  /*! Compton rate in cgs units */
+  double compton_rate_cgs;
+
+  /*! Index of the current redshift along the redshift index of the tables */
+  int z_index;
+
+  /*! Distance between the current redshift and table[z_index] */
+  float dz;
+
+  /*! Index of the previous tables along the redshift index of the tables */
+  int previous_z_index;
+
+  /*! Are we doing Newton-Raphson iterations? */
+  int newton_flag;
+};
 
 /**
  * @brief Properties of the cooling stored in the extended particle data.
  */
-struct cooling_xpart_data {};
+struct cooling_xpart_data {
+
+  /*! Cumulative energy radiated by the particle */
+  float radiated_energy;
+};
 
 #endif /* SWIFT_COOLING_STRUCT_EAGLE_H */
diff --git a/src/cooling/EAGLE/cooling_tables.c b/src/cooling/EAGLE/cooling_tables.c
new file mode 100644
index 0000000000000000000000000000000000000000..c66b7ebb8f8bea4aac557fe3b7f24f944014deda
--- /dev/null
+++ b/src/cooling/EAGLE/cooling_tables.c
@@ -0,0 +1,757 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/**
+ * @file src/cooling/EAGLE/cooling_tables.c
+ * @brief Functions to read EAGLE tables
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+#include <hdf5.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Local includes. */
+#include "chemistry_struct.h"
+#include "cooling_struct.h"
+#include "cooling_tables.h"
+#include "error.h"
+#include "interpolate.h"
+
+/**
+ * @brief Names of the elements in the order they are stored in the files
+ */
+static const char *eagle_tables_element_names[eagle_cooling_N_metal] = {
+    "Carbon",  "Nitrogen", "Oxygen",  "Neon", "Magnesium",
+    "Silicon", "Sulphur",  "Calcium", "Iron"};
+
+/*! Number of elements in a z-slice of the H+He cooling rate tables */
+static const size_t num_elements_cooling_rate =
+    eagle_cooling_N_temperature * eagle_cooling_N_density;
+
+/*! Number of elements in a z-slice of the metal cooling rate tables */
+static const size_t num_elements_metal_heating = eagle_cooling_N_metal *
+                                                 eagle_cooling_N_temperature *
+                                                 eagle_cooling_N_density;
+
+/*! Number of elements in a z-slice of the metal electron abundance tables */
+static const size_t num_elements_electron_abundance =
+    eagle_cooling_N_temperature * eagle_cooling_N_density;
+
+/*! Number of elements in a z-slice of the temperature tables */
+static const size_t num_elements_temperature = eagle_cooling_N_He_frac *
+                                               eagle_cooling_N_temperature *
+                                               eagle_cooling_N_density;
+
+/*! Number of elements in a z-slice of the H+He cooling rate tables */
+static const size_t num_elements_HpHe_heating = eagle_cooling_N_He_frac *
+                                                eagle_cooling_N_temperature *
+                                                eagle_cooling_N_density;
+
+/*! Number of elements in a z-slice of the H+He electron abundance tables */
+static const size_t num_elements_HpHe_electron_abundance =
+    eagle_cooling_N_He_frac * eagle_cooling_N_temperature *
+    eagle_cooling_N_density;
+
+/**
+ * @brief Reads in EAGLE table of redshift values
+ *
+ * @param cooling #cooling_function_data structure
+ */
+void get_cooling_redshifts(struct cooling_function_data *cooling) {
+
+  /* Read the list of table redshifts */
+  char redshift_filename[eagle_table_path_name_length + 16];
+  sprintf(redshift_filename, "%s/redshifts.dat", cooling->cooling_table_path);
+
+  FILE *infile = fopen(redshift_filename, "r");
+  if (infile == NULL) {
+    error("Cannot open the list of cooling table redshifts (%s)",
+          redshift_filename);
+  }
+
+  int N_Redshifts = -1;
+
+  /* Read the file */
+  if (!feof(infile)) {
+
+    char buffer[50];
+
+    /* Read the number of redshifts (1st line in the file) */
+    if (fgets(buffer, 50, infile) != NULL)
+      N_Redshifts = atoi(buffer);
+    else
+      error("Impossible to read the number of redshifts");
+
+    /* Be verbose about it */
+    message("Found cooling tables at %d redhsifts", N_Redshifts);
+
+    /* Check value */
+    if (N_Redshifts != eagle_cooling_N_redshifts)
+      error("Invalid redshift lenght array.");
+
+    /* Allocate the list of redshifts */
+    if (posix_memalign((void **)&cooling->Redshifts, SWIFT_STRUCT_ALIGNMENT,
+                       eagle_cooling_N_redshifts * sizeof(float)) != 0)
+      error("Failed to allocate redshift table");
+
+    /* Read all the redshift values */
+    int count = 0;
+    while (!feof(infile)) {
+      if (fgets(buffer, 50, infile) != NULL) {
+        cooling->Redshifts[count] = atof(buffer);
+        count++;
+      }
+    }
+
+    /* Verify that the file was self-consistent */
+    if (count != N_Redshifts) {
+      error(
+          "Redshift file (%s) does not contain the correct number of redshifts "
+          "(%d vs. %d)",
+          redshift_filename, count, N_Redshifts);
+    }
+  } else {
+    error("Redshift file (%s) is empty!", redshift_filename);
+  }
+
+  /* We are done with this file */
+  fclose(infile);
+
+  /* EAGLE cooling assumes cooling->Redshifts table is in increasing order. Test
+   * this. */
+  for (int i = 0; i < N_Redshifts - 1; i++) {
+    if (cooling->Redshifts[i + 1] < cooling->Redshifts[i]) {
+      error("table should be in increasing order\n");
+    }
+  }
+}
+
+/**
+ * @brief Reads in EAGLE cooling table header. Consists of tables
+ * of values for temperature, hydrogen number density, helium fraction
+ * solar element abundances, and elements used to index the cooling tables.
+ *
+ * @param fname Filepath for cooling table from which to read header
+ * @param cooling Cooling data structure
+ */
+void read_cooling_header(const char *fname,
+                         struct cooling_function_data *cooling) {
+
+#ifdef HAVE_HDF5
+
+  int N_Temp, N_nH, N_He, N_SolarAbundances, N_Elements;
+
+  /* read sizes of array dimensions */
+  hid_t tempfile_id = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+  if (tempfile_id < 0) error("unable to open file %s\n", fname);
+
+  /* read size of each table of values */
+  hid_t dataset =
+      H5Dopen(tempfile_id, "/Header/Number_of_temperature_bins", H5P_DEFAULT);
+  herr_t status =
+      H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &N_Temp);
+  if (status < 0) error("error reading number of temperature bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Check value */
+  if (N_Temp != eagle_cooling_N_temperature)
+    error("Invalid temperature array length.");
+
+  dataset = H5Dopen(tempfile_id, "/Header/Number_of_density_bins", H5P_DEFAULT);
+  status =
+      H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &N_nH);
+  if (status < 0) error("error reading number of density bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Check value */
+  if (N_nH != eagle_cooling_N_density) error("Invalid density array length.");
+
+  dataset =
+      H5Dopen(tempfile_id, "/Header/Number_of_helium_fractions", H5P_DEFAULT);
+  status =
+      H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &N_He);
+  if (status < 0) error("error reading number of He fraction bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Check value */
+  if (N_He != eagle_cooling_N_He_frac)
+    error("Invalid Helium fraction array length.");
+
+  dataset = H5Dopen(tempfile_id, "/Header/Abundances/Number_of_abundances",
+                    H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   &N_SolarAbundances);
+  if (status < 0) error("error reading number of solar abundance bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Check value */
+  if (N_SolarAbundances != eagle_cooling_N_abundances)
+    error("Invalid solar abundances array length.");
+
+  /* Check value */
+  if (N_SolarAbundances != chemistry_element_count + 2)
+    error("Number of abundances not compatible with the chemistry model.");
+
+  dataset = H5Dopen(tempfile_id, "/Header/Number_of_metals", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   &N_Elements);
+  if (status < 0) error("error reading number of metal bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Check value */
+  if (N_Elements != eagle_cooling_N_metal) error("Invalid metal array length.");
+
+  /* allocate arrays of values for each of the above quantities */
+  if (posix_memalign((void **)&cooling->Temp, SWIFT_STRUCT_ALIGNMENT,
+                     N_Temp * sizeof(float)) != 0)
+    error("Failed to allocate temperature table");
+  if (posix_memalign((void **)&cooling->Therm, SWIFT_STRUCT_ALIGNMENT,
+                     N_Temp * sizeof(float)) != 0)
+    error("Failed to allocate internal energy table");
+  if (posix_memalign((void **)&cooling->nH, SWIFT_STRUCT_ALIGNMENT,
+                     N_nH * sizeof(float)) != 0)
+    error("Failed to allocate nH table");
+  if (posix_memalign((void **)&cooling->HeFrac, SWIFT_STRUCT_ALIGNMENT,
+                     N_He * sizeof(float)) != 0)
+    error("Failed to allocate HeFrac table");
+  if (posix_memalign((void **)&cooling->SolarAbundances, SWIFT_STRUCT_ALIGNMENT,
+                     N_SolarAbundances * sizeof(float)) != 0)
+    error("Failed to allocate Solar abundances table");
+  if (posix_memalign((void **)&cooling->SolarAbundances_inv,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     N_SolarAbundances * sizeof(float)) != 0)
+    error("Failed to allocate Solar abundances inverses table");
+
+  /* read in values for each of the arrays */
+  dataset = H5Dopen(tempfile_id, "/Solar/Temperature_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   cooling->Temp);
+  if (status < 0) error("error reading temperature bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  dataset = H5Dopen(tempfile_id, "/Solar/Hydrogen_density_bins", H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   cooling->nH);
+  if (status < 0) error("error reading H density bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  dataset = H5Dopen(tempfile_id, "/Metal_free/Helium_mass_fraction_bins",
+                    H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   cooling->HeFrac);
+  if (status < 0) error("error reading He fraction bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  dataset = H5Dopen(tempfile_id, "/Header/Abundances/Solar_mass_fractions",
+                    H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   cooling->SolarAbundances);
+  if (status < 0) error("error reading solar mass fraction bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  dataset = H5Dopen(tempfile_id, "/Metal_free/Temperature/Energy_density_bins",
+                    H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   cooling->Therm);
+  if (status < 0) error("error reading internal energy bins");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Convert to temperature, density and internal energy arrays to log10 */
+  for (int i = 0; i < N_Temp; i++) {
+    cooling->Temp[i] = log10(cooling->Temp[i]);
+    cooling->Therm[i] = log10(cooling->Therm[i]);
+  }
+  for (int i = 0; i < N_nH; i++) {
+    cooling->nH[i] = log10(cooling->nH[i]);
+  }
+  /* Compute inverse of solar mass fractions */
+  for (int i = 0; i < N_SolarAbundances; ++i) {
+    cooling->SolarAbundances_inv[i] = 1.f / cooling->SolarAbundances[i];
+  }
+
+#else
+  error("Need HDF5 to read cooling tables");
+#endif
+}
+
+/**
+ * @brief Allocate space for cooling tables.
+ *
+ * @param cooling #cooling_function_data structure
+ */
+void allocate_cooling_tables(struct cooling_function_data *restrict cooling) {
+
+  /* Allocate arrays to store cooling tables. Arrays contain two tables of
+   * cooling rates with one table being for the redshift above current redshift
+   * and one below. */
+
+  if (posix_memalign((void **)&cooling->table.metal_heating,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     eagle_cooling_N_loaded_redshifts *
+                         num_elements_metal_heating * sizeof(float)) != 0)
+    error("Failed to allocate metal_heating array");
+
+  if (posix_memalign((void **)&cooling->table.electron_abundance,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     eagle_cooling_N_loaded_redshifts *
+                         num_elements_electron_abundance * sizeof(float)) != 0)
+    error("Failed to allocate electron_abundance array");
+
+  if (posix_memalign((void **)&cooling->table.temperature,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     eagle_cooling_N_loaded_redshifts *
+                         num_elements_temperature * sizeof(float)) != 0)
+    error("Failed to allocate temperature array");
+
+  if (posix_memalign((void **)&cooling->table.H_plus_He_heating,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     eagle_cooling_N_loaded_redshifts *
+                         num_elements_HpHe_heating * sizeof(float)) != 0)
+    error("Failed to allocate H_plus_He_heating array");
+
+  if (posix_memalign((void **)&cooling->table.H_plus_He_electron_abundance,
+                     SWIFT_STRUCT_ALIGNMENT,
+                     eagle_cooling_N_loaded_redshifts *
+                         num_elements_HpHe_electron_abundance *
+                         sizeof(float)) != 0)
+    error("Failed to allocate H_plus_He_electron_abundance array");
+}
+
+/**
+ * @brief Get the redshift invariant table of cooling rates (before reionization
+ * at redshift ~9) Reads in table of cooling rates and electron abundances due
+ * to metals (depending on temperature, hydrogen number density), cooling rates
+ * and electron abundances due to hydrogen and helium (depending on temperature,
+ * hydrogen number density and helium fraction), and temperatures (depending on
+ * internal energy, hydrogen number density and helium fraction; note: this is
+ * distinct from table of temperatures read in ReadCoolingHeader, as that table
+ * is used to index the cooling, electron abundance tables, whereas this one is
+ * used to obtain temperature of particle)
+ *
+ * @param cooling #cooling_function_data structure
+ * @param photodis Are we loading the photo-dissociation table?
+ */
+void get_redshift_invariant_table(
+    struct cooling_function_data *restrict cooling, const int photodis) {
+#ifdef HAVE_HDF5
+
+  /* Temporary tables */
+  float *net_cooling_rate = NULL;
+  float *electron_abundance = NULL;
+  float *temperature = NULL;
+  float *he_net_cooling_rate = NULL;
+  float *he_electron_abundance = NULL;
+
+  /* Allocate arrays for reading in cooling tables.  */
+  if (posix_memalign((void **)&net_cooling_rate, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_cooling_rate * sizeof(float)) != 0)
+    error("Failed to allocate net_cooling_rate array");
+  if (posix_memalign((void **)&electron_abundance, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_electron_abundance * sizeof(float)) != 0)
+    error("Failed to allocate electron_abundance array");
+  if (posix_memalign((void **)&temperature, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_temperature * sizeof(float)) != 0)
+    error("Failed to allocate temperature array");
+  if (posix_memalign((void **)&he_net_cooling_rate, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_HpHe_heating * sizeof(float)) != 0)
+    error("Failed to allocate he_net_cooling_rate array");
+  if (posix_memalign((void **)&he_electron_abundance, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_HpHe_electron_abundance * sizeof(float)) != 0)
+    error("Failed to allocate he_electron_abundance array");
+
+  /* Decide which high redshift table to read. Indices set in cooling_update */
+  char filename[eagle_table_path_name_length + 21];
+  if (photodis) {
+    sprintf(filename, "%sz_photodis.hdf5", cooling->cooling_table_path);
+    message("Reading cooling table 'z_photodis.hdf5'");
+  } else {
+    sprintf(filename, "%sz_8.989nocompton.hdf5", cooling->cooling_table_path);
+    message("Reading cooling table 'z_8.989nocompton.hdf5'");
+  }
+
+  hid_t file_id = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+  if (file_id < 0) error("unable to open file %s\n", filename);
+
+  char set_name[64];
+
+  /* read in cooling rates due to metals */
+  for (int specs = 0; specs < eagle_cooling_N_metal; specs++) {
+
+    /* Read in the cooling rate for this metal */
+    sprintf(set_name, "/%s/Net_Cooling", eagle_tables_element_names[specs]);
+    hid_t dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+    herr_t status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
+                            H5P_DEFAULT, net_cooling_rate);
+    if (status < 0) error("error reading metal cooling rate table");
+    status = H5Dclose(dataset);
+    if (status < 0) error("error closing cooling dataset");
+
+    /* Transpose from order tables are stored in (temperature, nH)
+     * to (metal species, nH, temperature) where fastest
+     * varying index is on right. Tables contain cooling rates but we
+     * want rate of change of internal energy, hence minus sign. */
+    for (int j = 0; j < eagle_cooling_N_temperature; j++) {
+      for (int k = 0; k < eagle_cooling_N_density; k++) {
+
+        /* Index in the HDF5 table */
+        const int hdf5_index = row_major_index_2d(
+            j, k, eagle_cooling_N_temperature, eagle_cooling_N_density);
+
+        /* Index in the internal table */
+        const int internal_index = row_major_index_3d(
+            specs, k, j, eagle_cooling_N_metal, eagle_cooling_N_density,
+            eagle_cooling_N_temperature);
+
+        /* Change the sign and transpose */
+        cooling->table.metal_heating[internal_index] =
+            -net_cooling_rate[hdf5_index];
+      }
+    }
+  }
+
+  /* read in cooling rates due to H + He */
+  strcpy(set_name, "/Metal_free/Net_Cooling");
+  hid_t dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+  herr_t status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
+                          H5P_DEFAULT, he_net_cooling_rate);
+  if (status < 0) error("error reading metal free cooling rate table");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* read in Temperatures */
+  strcpy(set_name, "/Metal_free/Temperature/Temperature");
+  dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   temperature);
+  if (status < 0) error("error reading temperature table");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* read in H + He electron abundances */
+  strcpy(set_name, "/Metal_free/Electron_density_over_n_h");
+  dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   he_electron_abundance);
+  if (status < 0) error("error reading electron density table");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Transpose from order tables are stored in (helium fraction, temperature,
+   * nH) to (nH, helium fraction, temperature) where fastest
+   * varying index is on right. Tables contain cooling rates but we
+   * want rate of change of internal energy, hence minus sign. */
+  for (int i = 0; i < eagle_cooling_N_He_frac; i++) {
+    for (int j = 0; j < eagle_cooling_N_temperature; j++) {
+      for (int k = 0; k < eagle_cooling_N_density; k++) {
+
+        /* Index in the HDF5 table */
+        const int hdf5_index = row_major_index_3d(
+            i, j, k, eagle_cooling_N_He_frac, eagle_cooling_N_temperature,
+            eagle_cooling_N_density);
+
+        /* Index in the internal table */
+        const int internal_index = row_major_index_3d(
+            k, i, j, eagle_cooling_N_density, eagle_cooling_N_He_frac,
+            eagle_cooling_N_temperature);
+
+        /* Change the sign and transpose */
+        cooling->table.H_plus_He_heating[internal_index] =
+            -he_net_cooling_rate[hdf5_index];
+
+        /* Convert to log T and transpose */
+        cooling->table.temperature[internal_index] =
+            log10(temperature[hdf5_index]);
+
+        /* Just transpose */
+        cooling->table.H_plus_He_electron_abundance[internal_index] =
+            he_electron_abundance[hdf5_index];
+      }
+    }
+  }
+
+  /* read in electron densities due to metals */
+  strcpy(set_name, "/Solar/Electron_density_over_n_h");
+  dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+  status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                   electron_abundance);
+  if (status < 0) error("error reading solar electron density table");
+  status = H5Dclose(dataset);
+  if (status < 0) error("error closing cooling dataset");
+
+  /* Transpose from order tables are stored in (temperature, nH) to
+   * (nH, temperature) where fastest varying index is on right. */
+  for (int i = 0; i < eagle_cooling_N_temperature; i++) {
+    for (int j = 0; j < eagle_cooling_N_density; j++) {
+
+      /* Index in the HDF5 table */
+      const int hdf5_index = row_major_index_2d(
+          i, j, eagle_cooling_N_temperature, eagle_cooling_N_density);
+
+      /* Index in the internal table */
+      const int internal_index = row_major_index_2d(
+          j, i, eagle_cooling_N_density, eagle_cooling_N_temperature);
+
+      /* Just transpose */
+      cooling->table.electron_abundance[internal_index] =
+          electron_abundance[hdf5_index];
+    }
+  }
+
+  status = H5Fclose(file_id);
+  if (status < 0) error("error closing file");
+
+  free(net_cooling_rate);
+  free(electron_abundance);
+  free(temperature);
+  free(he_net_cooling_rate);
+  free(he_electron_abundance);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  message("done reading in redshift invariant table");
+#endif
+
+#else
+  error("Need HDF5 to read cooling tables");
+#endif
+}
+
+/**
+ * @brief Get redshift dependent table of cooling rates.
+ * Reads in table of cooling rates and electron abundances due to
+ * metals (depending on temperature, hydrogen number density), cooling rates and
+ * electron abundances due to hydrogen and helium (depending on temperature,
+ * hydrogen number density and helium fraction), and temperatures (depending on
+ * internal energy, hydrogen number density and helium fraction; note: this is
+ * distinct from table of temperatures read in ReadCoolingHeader, as that table
+ * is used to index the cooling, electron abundance tables, whereas this one is
+ * used to obtain temperature of particle)
+ *
+ * @param cooling #cooling_function_data structure
+ * @param low_z_index Index of the lowest redshift table to load.
+ * @param high_z_index Index of the highest redshift table to load.
+ */
+void get_cooling_table(struct cooling_function_data *restrict cooling,
+                       const int low_z_index, const int high_z_index) {
+
+#ifdef HAVE_HDF5
+
+  /* Temporary tables */
+  float *net_cooling_rate = NULL;
+  float *electron_abundance = NULL;
+  float *temperature = NULL;
+  float *he_net_cooling_rate = NULL;
+  float *he_electron_abundance = NULL;
+
+  /* Allocate arrays for reading in cooling tables.  */
+  if (posix_memalign((void **)&net_cooling_rate, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_cooling_rate * sizeof(float)) != 0)
+    error("Failed to allocate net_cooling_rate array");
+  if (posix_memalign((void **)&electron_abundance, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_electron_abundance * sizeof(float)) != 0)
+    error("Failed to allocate electron_abundance array");
+  if (posix_memalign((void **)&temperature, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_temperature * sizeof(float)) != 0)
+    error("Failed to allocate temperature array");
+  if (posix_memalign((void **)&he_net_cooling_rate, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_HpHe_heating * sizeof(float)) != 0)
+    error("Failed to allocate he_net_cooling_rate array");
+  if (posix_memalign((void **)&he_electron_abundance, SWIFT_STRUCT_ALIGNMENT,
+                     num_elements_HpHe_electron_abundance * sizeof(float)) != 0)
+    error("Failed to allocate he_electron_abundance array");
+
+  /* Read in tables, transpose so that values for indices which vary most are
+   * adjacent. Repeat for redshift above and redshift below current value.  */
+  for (int z_index = low_z_index; z_index <= high_z_index; z_index++) {
+
+    /* Index along redhsift dimension for the subset of tables we read */
+    const int local_z_index = z_index - low_z_index;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (local_z_index >= eagle_cooling_N_loaded_redshifts)
+      error("Reading invalid number of tables along z axis.");
+#endif
+
+    /* Open table for this redshift index */
+    char fname[eagle_table_path_name_length + 12];
+    sprintf(fname, "%sz_%1.3f.hdf5", cooling->cooling_table_path,
+            cooling->Redshifts[z_index]);
+    message("Reading cooling table 'z_%1.3f.hdf5'",
+            cooling->Redshifts[z_index]);
+
+    hid_t file_id = H5Fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+    if (file_id < 0) error("unable to open file %s", fname);
+
+    char set_name[64];
+
+    /* read in cooling rates due to metals */
+    for (int specs = 0; specs < eagle_cooling_N_metal; specs++) {
+
+      sprintf(set_name, "/%s/Net_Cooling", eagle_tables_element_names[specs]);
+      hid_t dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+      herr_t status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
+                              H5P_DEFAULT, net_cooling_rate);
+      if (status < 0) error("error reading metal cooling rate table");
+      status = H5Dclose(dataset);
+      if (status < 0) error("error closing cooling dataset");
+
+      /* Transpose from order tables are stored in (temperature, nH)
+       * to (metal species, redshift, nH, temperature) where fastest
+       * varying index is on right. Tables contain cooling rates but we
+       * want rate of change of internal energy, hence minus sign. */
+      for (int i = 0; i < eagle_cooling_N_density; i++) {
+        for (int j = 0; j < eagle_cooling_N_temperature; j++) {
+
+          /* Index in the HDF5 table */
+          const int hdf5_index = row_major_index_2d(
+              j, i, eagle_cooling_N_temperature, eagle_cooling_N_density);
+
+          /* Index in the internal table */
+          const int internal_index = row_major_index_4d(
+              specs, local_z_index, i, j, eagle_cooling_N_metal,
+              eagle_cooling_N_loaded_redshifts, eagle_cooling_N_density,
+              eagle_cooling_N_temperature);
+
+          /* Change the sign and transpose */
+          cooling->table.metal_heating[internal_index] =
+              -net_cooling_rate[hdf5_index];
+        }
+      }
+    }
+
+    /* read in cooling rates due to H + He */
+    strcpy(set_name, "/Metal_free/Net_Cooling");
+    hid_t dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+    herr_t status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL,
+                            H5P_DEFAULT, he_net_cooling_rate);
+    if (status < 0) error("error reading metal free cooling rate table");
+    status = H5Dclose(dataset);
+    if (status < 0) error("error closing cooling dataset");
+
+    /* read in Temperature */
+    strcpy(set_name, "/Metal_free/Temperature/Temperature");
+    dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+    status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                     temperature);
+    if (status < 0) error("error reading temperature table");
+    status = H5Dclose(dataset);
+    if (status < 0) error("error closing cooling dataset");
+
+    /* Read in H + He electron abundance */
+    strcpy(set_name, "/Metal_free/Electron_density_over_n_h");
+    dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+    status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                     he_electron_abundance);
+    if (status < 0) error("error reading electron density table");
+    status = H5Dclose(dataset);
+    if (status < 0) error("error closing cooling dataset");
+
+    /* Transpose from order tables are stored in (helium fraction, temperature,
+     * nH) to (redshift, nH, helium fraction, temperature) where fastest
+     * varying index is on right. */
+    for (int i = 0; i < eagle_cooling_N_He_frac; i++) {
+      for (int j = 0; j < eagle_cooling_N_temperature; j++) {
+        for (int k = 0; k < eagle_cooling_N_density; k++) {
+
+          /* Index in the HDF5 table */
+          const int hdf5_index = row_major_index_3d(
+              i, j, k, eagle_cooling_N_He_frac, eagle_cooling_N_temperature,
+              eagle_cooling_N_density);
+
+          /* Index in the internal table */
+          const int internal_index = row_major_index_4d(
+              local_z_index, k, i, j, eagle_cooling_N_loaded_redshifts,
+              eagle_cooling_N_density, eagle_cooling_N_He_frac,
+              eagle_cooling_N_temperature);
+
+          /* Change the sign and transpose */
+          cooling->table.H_plus_He_heating[internal_index] =
+              -he_net_cooling_rate[hdf5_index];
+
+          /* Convert to log T and transpose */
+          cooling->table.temperature[internal_index] =
+              log10(temperature[hdf5_index]);
+
+          /* Just transpose */
+          cooling->table.H_plus_He_electron_abundance[internal_index] =
+              he_electron_abundance[hdf5_index];
+        }
+      }
+    }
+
+    /* read in electron densities due to metals */
+    strcpy(set_name, "/Solar/Electron_density_over_n_h");
+    dataset = H5Dopen(file_id, set_name, H5P_DEFAULT);
+    status = H5Dread(dataset, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+                     electron_abundance);
+    if (status < 0) error("error reading solar electron density table");
+    status = H5Dclose(dataset);
+    if (status < 0) error("error closing cooling dataset");
+
+    /* Transpose from order tables are stored in (temperature, nH) to
+     * (redshift, nH, temperature) where fastest varying index is on right. */
+    for (int i = 0; i < eagle_cooling_N_temperature; i++) {
+      for (int j = 0; j < eagle_cooling_N_density; j++) {
+
+        /* Index in the HDF5 table */
+        const int hdf5_index = row_major_index_2d(
+            i, j, eagle_cooling_N_temperature, eagle_cooling_N_density);
+
+        /* Index in the internal table */
+        const int internal_index = row_major_index_3d(
+            local_z_index, j, i, eagle_cooling_N_loaded_redshifts,
+            eagle_cooling_N_density, eagle_cooling_N_temperature);
+
+        /* Just transpose */
+        cooling->table.electron_abundance[internal_index] =
+            electron_abundance[hdf5_index];
+      }
+    }
+
+    status = H5Fclose(file_id);
+    if (status < 0) error("error closing file");
+  }
+
+  free(net_cooling_rate);
+  free(electron_abundance);
+  free(temperature);
+  free(he_net_cooling_rate);
+  free(he_electron_abundance);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  message("Done reading in general cooling table");
+#endif
+
+#else
+  error("Need HDF5 to read cooling tables");
+#endif
+}
diff --git a/src/cooling/EAGLE/cooling_tables.h b/src/cooling/EAGLE/cooling_tables.h
new file mode 100644
index 0000000000000000000000000000000000000000..20abd6f423c9c5aadbb30b9bb8096e860b050234
--- /dev/null
+++ b/src/cooling/EAGLE/cooling_tables.h
@@ -0,0 +1,65 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_COOL_TABLES_H
+#define SWIFT_EAGLE_COOL_TABLES_H
+
+/**
+ * @file src/cooling/EAGLE/cooling.h
+ * @brief EAGLE cooling function
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+#include "cooling_struct.h"
+
+/*! Number of different bins along the redhsift axis of the tables */
+#define eagle_cooling_N_redshifts 49
+
+/*! Number of redshift bins loaded at any given point int time */
+#define eagle_cooling_N_loaded_redshifts 2
+
+/*! Number of different bins along the temperature axis of the tables */
+#define eagle_cooling_N_temperature 176
+
+/*! Number of different bins along the density axis of the tables */
+#define eagle_cooling_N_density 41
+
+/*! Number of different bins along the metal axis of the tables */
+#define eagle_cooling_N_metal 9
+
+/*! Number of different bins along the metal axis of the tables */
+#define eagle_cooling_N_He_frac 7
+
+/*! Number of different bins along the abundances axis of the tables */
+#define eagle_cooling_N_abundances 11
+
+void get_cooling_redshifts(struct cooling_function_data *cooling);
+
+void read_cooling_header(const char *fname,
+                         struct cooling_function_data *cooling);
+
+void allocate_cooling_tables(struct cooling_function_data *restrict cooling);
+
+void get_redshift_invariant_table(
+    struct cooling_function_data *restrict cooling, const int photodis);
+void get_cooling_table(struct cooling_function_data *restrict cooling,
+                       const int low_z_index, const int high_z_index);
+
+#endif
diff --git a/src/cooling/EAGLE/interpolate.h b/src/cooling/EAGLE/interpolate.h
new file mode 100644
index 0000000000000000000000000000000000000000..78955e7fd7409d3501d16ae50d4a2248cb1cff0b
--- /dev/null
+++ b/src/cooling/EAGLE/interpolate.h
@@ -0,0 +1,499 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_INTERPOL_EAGLE_H
+#define SWIFT_INTERPOL_EAGLE_H
+
+/**
+ * @file src/cooling/EAGLE/interpolate.h
+ * @brief Interpolation functions for EAGLE tables
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes. */
+#include "align.h"
+#include "error.h"
+#include "inline.h"
+
+/**
+ * @brief Returns the 1d index of element with 2d indices x,y
+ * from a flattened 2d array in row major order
+ *
+ * @param x, y Indices of element of interest
+ * @param Nx, Ny Sizes of array dimensions
+ */
+__attribute__((always_inline)) INLINE int row_major_index_2d(const int x,
+                                                             const int y,
+                                                             const int Nx,
+                                                             const int Ny) {
+#ifdef SWIFT_DEBUG_CHECKS
+  assert(x < Nx);
+  assert(y < Ny);
+#endif
+  return x * Ny + y;
+}
+
+/**
+ * @brief Returns the 1d index of element with 3d indices x,y,z
+ * from a flattened 3d array in row major order
+ *
+ * @param x, y, z Indices of element of interest
+ * @param Nx, Ny, Nz Sizes of array dimensions
+ */
+__attribute__((always_inline)) INLINE int row_major_index_3d(
+    const int x, const int y, const int z, const int Nx, const int Ny,
+    const int Nz) {
+#ifdef SWIFT_DEBUG_CHECKS
+  assert(x < Nx);
+  assert(y < Ny);
+  assert(z < Nz);
+#endif
+  return x * Ny * Nz + y * Nz + z;
+}
+
+/**
+ * @brief Returns the 1d index of element with 4d indices x,y,z,w
+ * from a flattened 4d array in row major order
+ *
+ * @param x, y, z, w Indices of element of interest
+ * @param Nx, Ny, Nz, Nw Sizes of array dimensions
+ */
+__attribute__((always_inline)) INLINE int row_major_index_4d(
+    const int x, const int y, const int z, const int w, const int Nx,
+    const int Ny, const int Nz, const int Nw) {
+#ifdef SWIFT_DEBUG_CHECKS
+  assert(x < Nx);
+  assert(y < Ny);
+  assert(z < Nz);
+  assert(w < Nw);
+#endif
+  return x * Ny * Nz * Nw + y * Nz * Nw + z * Nw + w;
+}
+
+/**
+ * @brief Finds the index of a value in a table and compute delta to nearest
+ * element.
+ *
+ * This function assumes the table is monotonically increasing with a constant
+ * difference between adjacent values.
+ *
+ * The returned difference is expressed in units of the table separation. This
+ * means dx = (x - table[i]) / (table[i+1] - table[i]). It is always between
+ * 0 and 1.
+ *
+ * We use a small epsilon of 1e-4 to avoid out-of-range accesses due to
+ * rounding errors.
+ *
+ * @param table The table to search in.
+ * @param size The number of elements in the table.
+ * @param x The value to search for.
+ * @param i (return) The index in the table of the element.
+ * @param *dx (return) The difference between x and table[i]
+ */
+__attribute__((always_inline)) INLINE void get_index_1d(
+    const float *restrict table, const int size, const float x, int *i,
+    float *restrict dx) {
+
+  /* Small epsilon to avoid rounding issues leading to out-of-bound
+   * access when using the indices later to read data from the tables. */
+  const float epsilon = 1e-4f;
+
+  /* Indicate that the whole array is aligned on boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Distance between elements in the array */
+  const float delta = (size - 1) / (table[size - 1] - table[0]);
+
+  if (x < table[0] + epsilon) {
+    /* We are below the first element */
+    *i = 0;
+    *dx = 0.f;
+  } else if (x < table[size - 1] - epsilon) {
+    /* Normal case */
+    *i = (x - table[0]) * delta;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (*i > size || *i < 0) {
+      error(
+          "trying to get index for value outside table range. Table size: %d, "
+          "calculated index: %d, value: %.5e, table[0]: %.5e, grid size: %.5e",
+          size, *i, x, table[0], delta);
+    }
+#endif
+
+    *dx = (x - table[*i]) * delta;
+  } else {
+    /* We are after the last element */
+    *i = size - 2;
+    *dx = 1.f;
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (*dx < -0.001f || *dx > 1.001f) error("Invalid distance found dx=%e", *dx);
+#endif
+}
+
+/**
+ * @brief Interpolate a flattened 2D table at a given position.
+ *
+ * This function uses linear interpolation along each axis. It also
+ * assumes that the table is aligned on SWIFT_STRUCT_ALIGNMENT.
+ *
+ * @param table The 2D table to interpolate.
+ * @param xi, yi Indices of element of interest.
+ * @param Nx, Ny Sizes of array dimensions.
+ * @param dx, dy Distance between the point and the index in units of
+ * the grid spacing.
+ */
+__attribute__((always_inline)) INLINE float interpolation_2d(
+    const float *table, const int xi, const int yi, const float dx,
+    const float dy, const int Nx, const int Ny) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (dx < -0.001f || dx > 1.001f) error("Invalid dx=%e", dx);
+  if (dy < -0.001f || dy > 1.001f) error("Invalid dy=%e", dy);
+#endif
+
+  const float tx = 1.f - dx;
+  const float ty = 1.f - dy;
+
+  /* Indicate that the whole array is aligned on boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Linear interpolation along each axis. We read the table 2^2=4 times */
+  float result = tx * ty * table[row_major_index_2d(xi + 0, yi + 0, Nx, Ny)];
+
+  result += tx * dy * table[row_major_index_2d(xi + 0, yi + 1, Nx, Ny)];
+  result += dx * ty * table[row_major_index_2d(xi + 1, yi + 0, Nx, Ny)];
+
+  result += dx * dy * table[row_major_index_2d(xi + 1, yi + 1, Nx, Ny)];
+
+  return result;
+}
+
+/**
+ * @brief Interpolate a flattened 3D table at a given position.
+ *
+ * This function uses linear interpolation along each axis. It also
+ * assumes that the table is aligned on SWIFT_STRUCT_ALIGNMENT.
+ *
+ * @param table The 3D table to interpolate.
+ * @param xi, yi, zi Indices of element of interest.
+ * @param Nx, Ny, Nz Sizes of array dimensions.
+ * @param dx, dy, dz Distance between the point and the index in units of
+ * the grid spacing.
+ */
+__attribute__((always_inline)) INLINE float interpolation_3d(
+    const float *table, const int xi, const int yi, const int zi,
+    const float dx, const float dy, const float dz, const int Nx, const int Ny,
+    const int Nz) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (dx < -0.001f || dx > 1.001f) error("Invalid dx=%e", dx);
+  if (dy < -0.001f || dy > 1.001f) error("Invalid dy=%e", dy);
+  if (dz < -0.001f || dz > 1.001f) error("Invalid dz=%e", dz);
+#endif
+
+  const float tx = 1.f - dx;
+  const float ty = 1.f - dy;
+  const float tz = 1.f - dz;
+
+  /* Indicate that the whole array is aligned on page boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Linear interpolation along each axis. We read the table 2^3=8 times */
+  float result = tx * ty * tz *
+                 table[row_major_index_3d(xi + 0, yi + 0, zi + 0, Nx, Ny, Nz)];
+
+  result += tx * ty * dz *
+            table[row_major_index_3d(xi + 0, yi + 0, zi + 1, Nx, Ny, Nz)];
+  result += tx * dy * tz *
+            table[row_major_index_3d(xi + 0, yi + 1, zi + 0, Nx, Ny, Nz)];
+  result += dx * ty * tz *
+            table[row_major_index_3d(xi + 1, yi + 0, zi + 0, Nx, Ny, Nz)];
+
+  result += tx * dy * dz *
+            table[row_major_index_3d(xi + 0, yi + 1, zi + 1, Nx, Ny, Nz)];
+  result += dx * ty * dz *
+            table[row_major_index_3d(xi + 1, yi + 0, zi + 1, Nx, Ny, Nz)];
+  result += dx * dy * tz *
+            table[row_major_index_3d(xi + 1, yi + 1, zi + 0, Nx, Ny, Nz)];
+
+  result += dx * dy * dz *
+            table[row_major_index_3d(xi + 1, yi + 1, zi + 1, Nx, Ny, Nz)];
+
+  return result;
+}
+
+/**
+ * @brief Interpolate a flattened 3D table at a given position but avoid the
+ * x-dimension.
+ *
+ * This function uses linear interpolation along each axis.
+ * We look at the xi coordoniate but do not interpolate around it. We just
+ * interpolate the remaining 2 dimensions.
+ * The function also assumes that the table is aligned on
+ * SWIFT_STRUCT_ALIGNMENT.
+ *
+ * @param table The 3D table to interpolate.
+ * @param xi, yi, zi Indices of element of interest.
+ * @param Nx, Ny, Nz Sizes of array dimensions.
+ * @param dx, dy, dz Distance between the point and the index in units of
+ * the grid spacing.
+ */
+__attribute__((always_inline)) INLINE float interpolation_3d_no_x(
+    const float *table, const int xi, const int yi, const int zi,
+    const float dx, const float dy, const float dz, const int Nx, const int Ny,
+    const int Nz) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (dx != 0.f) error("Attempting to interpolate along x!");
+  if (dy < -0.001f || dy > 1.001f) error("Invalid dy=%e", dy);
+  if (dz < -0.001f || dz > 1.001f) error("Invalid dz=%e", dz);
+#endif
+
+  const float tx = 1.f;
+  const float ty = 1.f - dy;
+  const float tz = 1.f - dz;
+
+  /* Indicate that the whole array is aligned on page boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Linear interpolation along each axis. We read the table 2^2=4 times */
+  /* Note that we intentionally kept the table access along the axis where */
+  /* we do not interpolate as comments in the code to allow readers to */
+  /* understand what is going on. */
+  float result = tx * ty * tz *
+                 table[row_major_index_3d(xi + 0, yi + 0, zi + 0, Nx, Ny, Nz)];
+
+  result += tx * ty * dz *
+            table[row_major_index_3d(xi + 0, yi + 0, zi + 1, Nx, Ny, Nz)];
+  result += tx * dy * tz *
+            table[row_major_index_3d(xi + 0, yi + 1, zi + 0, Nx, Ny, Nz)];
+  /* result += dx * ty * tz * */
+  /*           table[row_major_index_3d(xi + 1, yi + 0, zi + 0, Nx, Ny, Nz)]; */
+
+  result += tx * dy * dz *
+            table[row_major_index_3d(xi + 0, yi + 1, zi + 1, Nx, Ny, Nz)];
+  /* result += dx * ty * dz * */
+  /*           table[row_major_index_3d(xi + 1, yi + 0, zi + 1, Nx, Ny, Nz)]; */
+  /* result += dx * dy * tz * */
+  /*           table[row_major_index_3d(xi + 1, yi + 1, zi + 0, Nx, Ny, Nz)]; */
+
+  /* result += dx * dy * dz * */
+  /*           table[row_major_index_3d(xi + 1, yi + 1, zi + 1, Nx, Ny, Nz)]; */
+
+  return result;
+}
+
+/**
+ * @brief Interpolate a flattened 4D table at a given position.
+ *
+ * This function uses linear interpolation along each axis. It also
+ * assumes that the table is aligned on SWIFT_STRUCT_ALIGNMENT.
+ *
+ * @param table The 4D table to interpolate.
+ * @param xi, yi, zi, wi Indices of element of interest.
+ * @param Nx, Ny, Nz, Nw Sizes of array dimensions.
+ * @param dx, dy, dz, dw Distance between the point and the index in units of
+ * the grid spacing.
+ */
+__attribute__((always_inline)) INLINE float interpolation_4d(
+    const float *table, const int xi, const int yi, const int zi, const int wi,
+    const float dx, const float dy, const float dz, const float dw,
+    const int Nx, const int Ny, const int Nz, const int Nw) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (dx < -0.001f || dx > 1.001f) error("Invalid dx=%e", dx);
+  if (dy < -0.001f || dy > 1.001f) error("Invalid dy=%e", dy);
+  if (dz < -0.001f || dz > 1.001f) error("Invalid dz=%e", dz);
+  if (dw < -0.001f || dw > 1.001f) error("Invalid dw=%e", dw);
+#endif
+
+  const float tx = 1.f - dx;
+  const float ty = 1.f - dy;
+  const float tz = 1.f - dz;
+  const float tw = 1.f - dw;
+
+  /* Indicate that the whole array is aligned on page boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Linear interpolation along each axis. We read the table 2^4=16 times */
+  float result =
+      tx * ty * tz * tw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+
+  result +=
+      tx * ty * tz * dw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * ty * dz * tw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * tz * tw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * ty * tz * tw *
+      table[row_major_index_4d(xi + 1, yi + 0, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+
+  result +=
+      tx * ty * dz * dw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * tz * dw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * ty * tz * dw *
+      table[row_major_index_4d(xi + 1, yi + 0, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * dz * tw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * ty * dz * tw *
+      table[row_major_index_4d(xi + 1, yi + 0, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * dy * tz * tw *
+      table[row_major_index_4d(xi + 1, yi + 1, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+
+  result +=
+      dx * dy * dz * tw *
+      table[row_major_index_4d(xi + 1, yi + 1, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * dy * tz * dw *
+      table[row_major_index_4d(xi + 1, yi + 1, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      dx * ty * dz * dw *
+      table[row_major_index_4d(xi + 1, yi + 0, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * dz * dw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+
+  result +=
+      dx * dy * dz * dw *
+      table[row_major_index_4d(xi + 1, yi + 1, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+
+  return result;
+}
+
+/**
+ * @brief Interpolate a flattened 4D table at a given position but avoid the
+ * x-dimension.
+ *
+ * This function uses linear interpolation along each axis.
+ * We look at the xi coordoniate but do not interpolate around it. We just
+ * interpolate the remaining 3 dimensions.
+ * The function also assumes that the table is aligned on
+ * SWIFT_STRUCT_ALIGNMENT.
+ *
+ * @param table The 4D table to interpolate.
+ * @param xi, yi, zi, wi Indices of element of interest.
+ * @param Nx, Ny, Nz, Nw Sizes of array dimensions.
+ * @param dx, dy, dz, dw Distance between the point and the index in units of
+ * the grid spacing.
+ */
+__attribute__((always_inline)) INLINE float interpolation_4d_no_x(
+    const float *table, const int xi, const int yi, const int zi, const int wi,
+    const float dx, const float dy, const float dz, const float dw,
+    const int Nx, const int Ny, const int Nz, const int Nw) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (dx != 0.f) error("Attempting to interpolate along x!");
+  if (dy < -0.001f || dy > 1.001f) error("Invalid dy=%e", dy);
+  if (dz < -0.001f || dz > 1.001f) error("Invalid dz=%e", dz);
+  if (dw < -0.001f || dw > 1.001f) error("Invalid dw=%e", dw);
+#endif
+
+  const float tx = 1.f;
+  const float ty = 1.f - dy;
+  const float tz = 1.f - dz;
+  const float tw = 1.f - dw;
+
+  /* Indicate that the whole array is aligned on boundaries */
+  swift_align_information(float, table, SWIFT_STRUCT_ALIGNMENT);
+
+  /* Linear interpolation along each axis. We read the table 2^3=8 times */
+  /* Note that we intentionally kept the table access along the axis where */
+  /* we do not interpolate as comments in the code to allow readers to */
+  /* understand what is going on. */
+  float result =
+      tx * ty * tz * tw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+
+  result +=
+      tx * ty * tz * dw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * ty * dz * tw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * tz * tw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 0, wi + 0, Nx, Ny, Nz, Nw)];
+  /* result += */
+  /*     dx * ty * tz * tw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 0, zi + 0, wi + 0, Nx, Ny, Nz,
+   * Nw)]; */
+
+  result +=
+      tx * ty * dz * dw *
+      table[row_major_index_4d(xi + 0, yi + 0, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+  result +=
+      tx * dy * tz * dw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 0, wi + 1, Nx, Ny, Nz, Nw)];
+  /* result += */
+  /*     dx * ty * tz * dw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 0, zi + 0, wi + 1, Nx, Ny, Nz,
+   * Nw)]; */
+  result +=
+      tx * dy * dz * tw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 1, wi + 0, Nx, Ny, Nz, Nw)];
+  /* result += */
+  /*     dx * ty * dz * tw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 0, zi + 1, wi + 0, Nx, Ny, Nz,
+   * Nw)]; */
+  /* result += */
+  /*     dx * dy * tz * tw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 1, zi + 0, wi + 0, Nx, Ny, Nz, */
+  /* Nw)]; */
+
+  /* result += */
+  /*     dx * dy * dz * tw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 1, zi + 1, wi + 0, Nx, Ny, Nz, */
+  /* Nw)]; */
+  /* result += */
+  /*     dx * dy * tz * dw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 1, zi + 0, wi + 1, Nx, Ny, Nz, */
+  /* Nw)]; */
+  /* result += */
+  /*     dx * ty * dz * dw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 0, zi + 1, wi + 1, Nx, Ny, Nz,
+   * Nw)]; */
+  result +=
+      tx * dy * dz * dw *
+      table[row_major_index_4d(xi + 0, yi + 1, zi + 1, wi + 1, Nx, Ny, Nz, Nw)];
+
+  /* result += */
+  /*     dx * dy * dz * dw * */
+  /*     table[row_major_index_4d(xi + 1, yi + 1, zi + 1, wi + 1, Nx, Ny, Nz, */
+  /* Nw)]; */
+
+  return result;
+}
+
+#endif
diff --git a/src/cooling/const_du/cooling.h b/src/cooling/const_du/cooling.h
index b6fea7eea7b0fb208c4bffece425ec836d5df0c0..15eecc43093e9cc571d166aa49392e7dfea60c11 100644
--- a/src/cooling/const_du/cooling.h
+++ b/src/cooling/const_du/cooling.h
@@ -25,26 +25,41 @@
  * @file src/cooling/const_du/cooling.h
  * @brief Routines related to the "constant cooling" cooling function.
  *
- * This is the simplest possible cooling function. A constant cooling rate with
- * a minimal energy floor is applied. Should be used as a template for more
- * realistic functions.
+ * This is the simplest possible cooling function. A constant cooling rate
+ * (du/dt) with a minimal energy floor is applied. Should be used as a template
+ * for more realistic functions.
+ *
+ * This cooling model does NOT include cosmological terms and will hence yield
+ * an incorrect answer when running in co-moving coordinates.
  */
 
 /* Config parameters. */
 #include "../config.h"
 
 /* Some standard headers. */
+#include <float.h>
 #include <math.h>
 
 /* Local includes. */
-#include "const.h"
-#include "error.h"
+#include "entropy_floor.h"
 #include "hydro.h"
 #include "parser.h"
 #include "part.h"
 #include "physical_constants.h"
 #include "units.h"
 
+/**
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift.
+ *
+ * @param cosmo The current cosmological model.
+ * @param cooling The #cooling_function_data used in the run.
+ */
+INLINE static void cooling_update(const struct cosmology* cosmo,
+                                  struct cooling_function_data* cooling) {
+  // Add content if required.
+}
+
 /**
  * @brief Apply the cooling function to a particle.
  *
@@ -54,26 +69,32 @@
  * @param phys_const The physical constants in internal units.
  * @param us The internal system of units.
  * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param floor_props Properties of the entropy floor.
  * @param cooling The #cooling_function_data used in the run.
  * @param p Pointer to the particle data.
  * @param xp Pointer to the extended particle data.
  * @param dt The time-step of this particle.
+ * @param dt_therm The time-step operator used for thermal quantities.
  */
 __attribute__((always_inline)) INLINE static void cooling_cool_part(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
+    const struct hydro_props* hydro_props,
+    const struct entropy_floor_properties* floor_props,
     const struct cooling_function_data* restrict cooling,
-    struct part* restrict p, struct xpart* restrict xp, float dt) {
+    struct part* restrict p, struct xpart* restrict xp, const float dt,
+    const float dt_therm) {
 
   /* Internal energy floor */
   const float u_floor = cooling->min_energy;
 
   /* Get current internal energy */
-  const float u_old = hydro_get_physical_internal_energy(p, cosmo);
+  const float u_old = hydro_get_physical_internal_energy(p, xp, cosmo);
 
   /* Current du_dt */
-  const float hydro_du_dt = hydro_get_internal_energy_dt(p);
+  const float hydro_du_dt = hydro_get_physical_internal_energy_dt(p, cosmo);
 
   /* Get cooling function properties */
   float cooling_du_dt = -cooling->cooling_rate;
@@ -86,7 +107,7 @@ __attribute__((always_inline)) INLINE static void cooling_cool_part(
   }
 
   /* Update the internal energy time derivative */
-  hydro_set_internal_energy_dt(p, hydro_du_dt + cooling_du_dt);
+  hydro_set_physical_internal_energy_dt(p, cosmo, hydro_du_dt + cooling_du_dt);
 
   /* Store the radiated energy */
   xp->cooling_data.radiated_energy += -hydro_get_mass(p) * cooling_du_dt * dt;
@@ -103,16 +124,21 @@ __attribute__((always_inline)) INLINE static void cooling_cool_part(
  * @param phys_const The physical constants in internal units.
  * @param cosmo The current cosmological model.
  * @param us The internal system of units.
+ * @param hydro_props The properties of the hydro scheme.
  * @param p Pointer to the particle data.
+ * @param xp Pointer to the extedended particle data.
  */
 __attribute__((always_inline)) INLINE static float cooling_timestep(
     const struct cooling_function_data* restrict cooling,
     const struct phys_const* restrict phys_const,
     const struct cosmology* restrict cosmo,
-    const struct unit_system* restrict us, const struct part* restrict p) {
+    const struct unit_system* restrict us,
+    const struct hydro_props* hydro_props, const struct part* restrict p,
+    const struct xpart* xp) {
 
   const float cooling_rate = cooling->cooling_rate;
-  const float internal_energy = hydro_get_physical_internal_energy(p, cosmo);
+  const float internal_energy =
+      hydro_get_physical_internal_energy(p, xp, cosmo);
   return cooling->cooling_tstep_mult * internal_energy / fabsf(cooling_rate);
 }
 
@@ -126,7 +152,10 @@ __attribute__((always_inline)) INLINE static float cooling_timestep(
  *
  * @param p Pointer to the particle data.
  * @param xp Pointer to the extended particle data.
+ * @param phys_const The physical constants in internal units.
  * @param cooling The properties of the cooling function.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
  */
 __attribute__((always_inline)) INLINE static void cooling_first_init_part(
     const struct phys_const* restrict phys_const,
@@ -138,6 +167,49 @@ __attribute__((always_inline)) INLINE static void cooling_first_init_part(
   xp->cooling_data.radiated_energy = 0.f;
 }
 
+/**
+ * @brief Compute the temperature of a #part based on the cooling function.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+INLINE static float cooling_get_temperature(
+    const struct phys_const* restrict phys_const,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, const struct xpart* restrict xp) {
+
+  /* Physical constants */
+  const double m_H = phys_const->const_proton_mass;
+  const double k_B = phys_const->const_boltzmann_k;
+
+  /* Gas properties */
+  const double T_transition = hydro_props->hydrogen_ionization_temperature;
+  const double mu_neutral = hydro_props->mu_neutral;
+  const double mu_ionised = hydro_props->mu_ionised;
+
+  /* Particle temperature */
+  const double u = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Temperature over mean molecular weight */
+  const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B;
+
+  /* Are we above or below the HII -> HI transition? */
+  if (T_over_mu > (T_transition + 1.) / mu_ionised)
+    return T_over_mu * mu_ionised;
+  else if (T_over_mu < (T_transition - 1.) / mu_neutral)
+    return T_over_mu * mu_neutral;
+  else
+    return T_transition;
+}
+
 /**
  * @brief Returns the total radiated energy by this particle.
  *
@@ -176,6 +248,18 @@ static INLINE void cooling_init_backend(struct swift_params* parameter_file,
       parameter_file, "ConstCooling:cooling_tstep_mult");
 }
 
+/**
+ * @brief Restore cooling tables (if applicable) after
+ * restart
+ *
+ * Nothing to do here
+ *
+ * @param cooling the cooling_function_data structure
+ * @param cosmo cosmology structure
+ */
+static INLINE void cooling_restore_tables(struct cooling_function_data* cooling,
+                                          const struct cosmology* cosmo) {}
+
 /**
  * @brief Prints the properties of the cooling model to stdout.
  *
@@ -188,4 +272,42 @@ static INLINE void cooling_print_backend(
           cooling->cooling_rate, cooling->min_energy);
 }
 
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * @param cooling the cooling data structure.
+ */
+static INLINE void cooling_clean(struct cooling_function_data* cooling) {}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * Nothing to do beyond writing the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+static INLINE void cooling_struct_dump(
+    const struct cooling_function_data* cooling, FILE* stream) {
+  restart_write_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                       stream, "cooling", "cooling function");
+}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Nothing to do beyond reading the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+static INLINE void cooling_struct_restore(struct cooling_function_data* cooling,
+                                          FILE* stream,
+                                          const struct cosmology* cosmo) {
+  restart_read_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                      stream, NULL, "cooling function");
+}
+
 #endif /* SWIFT_COOLING_CONST_DU_H */
diff --git a/src/cooling/const_du/cooling_io.h b/src/cooling/const_du/cooling_io.h
index 52a943aca86e51665fd1841d7bcb8a100b046ed8..a60aa5d282d0a244f206f74827f0c1979d3bcb75 100644
--- a/src/cooling/const_du/cooling_io.h
+++ b/src/cooling/const_du/cooling_io.h
@@ -21,38 +21,63 @@
 #ifndef SWIFT_COOLING_CONST_DU_IO_H
 #define SWIFT_COOLING_CONST_DU_IO_H
 
+/**
+ * @file src/cooling/const_du/cooling_io.h
+ * @brief i/o routines related to the "constant cooling" cooling function.
+ *
+ * This is the simplest possible cooling function. A constant cooling rate
+ * (du/dt) with a minimal energy floor is applied. Should be used as a template
+ * for more realistic functions.
+ */
+
 /* Config parameters. */
 #include "../config.h"
 
 /* Local includes */
+#include "cooling.h"
 #include "io_properties.h"
 
 #ifdef HAVE_HDF5
 
 /**
  * @brief Writes the current model of SPH to the file
- * @param h_grpsph The HDF5 group in which to write
+ * @param h_grp The HDF5 group in which to write
+ * @param cooling the parameters of the cooling function.
  */
 __attribute__((always_inline)) INLINE static void cooling_write_flavour(
-    hid_t h_grpsph) {
+    hid_t h_grp, const struct cooling_function_data* cooling) {
 
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Constant du/dt");
+  io_write_attribute_s(h_grp, "Cooling Model", "Constant du/dt");
 }
 #endif
 
+INLINE static void convert_part_T(const struct engine* e, const struct part* p,
+                                  const struct xpart* xp, float* ret) {
+
+  ret[0] = cooling_get_temperature(e->physical_constants, e->hydro_properties,
+                                   e->internal_units, e->cosmology,
+                                   e->cooling_func, p, xp);
+}
+
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
  * @param parts The particle array.
+ * @param xparts The exended particle data array.
  * @param list The list of i/o properties to write.
  * @param cooling The #cooling_function_data
  *
  * @return Returns the number of fields to write.
  */
 __attribute__((always_inline)) INLINE static int cooling_write_particles(
-    const struct xpart* xparts, struct io_props* list,
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
     const struct cooling_function_data* cooling) {
-  return 0;
+
+  list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1,
+                                              UNIT_CONV_TEMPERATURE, parts,
+                                              xparts, convert_part_T);
+
+  return 1;
 }
 
 #endif /* SWIFT_COOLING_CONST_DU_IO_H */
diff --git a/src/cooling/const_du/cooling_struct.h b/src/cooling/const_du/cooling_struct.h
index cc00b001cf6b576266de02dac885f87d089bd8e4..94db6b6542cacda6dbdc43c6db6b9c2cac7961d6 100644
--- a/src/cooling/const_du/cooling_struct.h
+++ b/src/cooling/const_du/cooling_struct.h
@@ -23,11 +23,11 @@
 
 /**
  * @file src/cooling/const_du/cooling_struct.h
- * @brief Structure related to the "constant cooling" cooling function.
+ * @brief Structures related to the "constant cooling" cooling function.
  *
- * This is the simplest possible cooling function. A constant cooling rate with
- * a minimal energy floor is applied. Should be used as a template for more
- * realistic functions.
+ * This is the simplest possible cooling function. A constant cooling rate
+ * (du/dt) with a minimal energy floor is applied. Should be used as a template
+ * for more realistic functions.
  */
 
 /**
diff --git a/src/cooling/const_lambda/cooling.h b/src/cooling/const_lambda/cooling.h
index f1a7abdbe14a39d98bbd01eb36ba870c8af0ee1a..974b055b1f8942f53b72e6ccf17389d8f85b666e 100644
--- a/src/cooling/const_lambda/cooling.h
+++ b/src/cooling/const_lambda/cooling.h
@@ -1,8 +1,6 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *                    Richard Bower (r.g.bower@durham.ac.uk)
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
  *                    Stefan Arridge  (stefan.arridge@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
@@ -19,10 +17,17 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-
 #ifndef SWIFT_COOLING_CONST_LAMBDA_H
 #define SWIFT_COOLING_CONST_LAMBDA_H
 
+/**
+ * @file src/cooling/const_lambda/cooling.h
+ * @brief Routines related to the "constant lambda" cooling function.
+ *
+ * This model assumes a constant cooling rate Lambda irrespective of redshift
+ * or density.
+ */
+
 /* Config parameters. */
 #include "../config.h"
 
@@ -32,6 +37,7 @@
 
 /* Local includes. */
 #include "const.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "hydro.h"
 #include "parser.h"
@@ -40,30 +46,51 @@
 #include "units.h"
 
 /**
- * @brief Calculates du/dt in code units for a particle.
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift.
  *
- * @param phys_const The physical constants in internal units.
- * @param us The internal system of units.
  * @param cosmo The current cosmological model.
  * @param cooling The #cooling_function_data used in the run.
- * @param p Pointer to the particle data..
  */
-__attribute__((always_inline)) INLINE static float cooling_rate(
-    const struct phys_const* const phys_const, const struct unit_system* us,
-    const struct cosmology* restrict cosmo,
+INLINE static void cooling_update(const struct cosmology* cosmo,
+                                  struct cooling_function_data* cooling) {
+  // Add content if required.
+}
+
+/**
+ * @brief Calculates du/dt in CGS units for a particle.
+ *
+ * The cooling rate is \f$\frac{du}{dt} = -\frac{\Lambda}{n_H^2}
+ * \frac{n_H^2}{\rho} \f$, where \f$ \frac{\Lambda}{n_H^2} \f$ is a constant in
+ * this model (lambda_nH2_cgs in #cooling_function_data).
+ * The returned value is in physical [erg * g^-1 * s^-1].
+ *
+ * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @return The change in energy per unit mass due to cooling for this particle
+ * in cgs units [erg * g^-1 * s^-1].
+ */
+__attribute__((always_inline)) INLINE static double cooling_rate_cgs(
+    const struct cosmology* cosmo, const struct hydro_props* hydro_props,
     const struct cooling_function_data* cooling, const struct part* p) {
 
-  /* Get particle density */
-  const float rho = hydro_get_physical_density(p, cosmo);
+  /* Get particle density [g * cm^-3] */
+  const double rho = hydro_get_physical_density(p, cosmo);
+  const double rho_cgs = rho * cooling->conv_factor_density_to_cgs;
+
+  /* Get Hydrogen mass fraction */
+  const double X_H = hydro_props->hydrogen_mass_fraction;
 
-  /* Get cooling function properties */
-  const float X_H = cooling->hydrogen_mass_abundance;
+  /* Hydrogen number density (X_H * rho / m_p) [cm^-3] */
+  const double n_H_cgs = X_H * rho_cgs * cooling->proton_mass_cgs_inv;
 
-  /* Calculate du_dt */
-  const float du_dt = -cooling->lambda *
-                      (X_H * rho / phys_const->const_proton_mass) *
-                      (X_H * rho / phys_const->const_proton_mass) / rho;
-  return du_dt;
+  /* Calculate du_dt ((Lambda / n_H^2) * n_H^2 / rho) */
+  const double du_dt_cgs =
+      -cooling->lambda_nH2_cgs * n_H_cgs * n_H_cgs / rho_cgs;
+
+  return du_dt_cgs;
 }
 
 /**
@@ -72,75 +99,135 @@ __attribute__((always_inline)) INLINE static float cooling_rate(
  * @param phys_const The physical constants in internal units.
  * @param us The internal system of units.
  * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param floor_props Properties of the entropy floor.
  * @param cooling The #cooling_function_data used in the run.
  * @param p Pointer to the particle data.
+ * @param xp Pointer to the particle' extended data.
  * @param dt The time-step of this particle.
+ * @param dt_therm The time-step operator used for thermal quantities.
  */
 __attribute__((always_inline)) INLINE static void cooling_cool_part(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
+    const struct hydro_props* hydro_props,
+    const struct entropy_floor_properties* floor_props,
     const struct cooling_function_data* restrict cooling,
-    struct part* restrict p, struct xpart* restrict xp, float dt) {
+    struct part* restrict p, struct xpart* restrict xp, const float dt,
+    const float dt_therm) {
 
-  /* Internal energy floor */
-  const float u_floor = cooling->min_energy;
+  /* Nothing to do here? */
+  if (dt == 0.) return;
 
   /* Current energy */
-  const float u_old = hydro_get_physical_internal_energy(p, cosmo);
+  const float u_old = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Current du_dt in physical coordinates (internal units) */
+  const float hydro_du_dt = hydro_get_physical_internal_energy_dt(p, cosmo);
 
-  /* Current du_dt */
-  const float hydro_du_dt = hydro_get_internal_energy_dt(p);
+  /* Calculate cooling du_dt (in cgs units) */
+  const double cooling_du_dt_cgs =
+      cooling_rate_cgs(cosmo, hydro_props, cooling, p);
 
-  /* Calculate cooling du_dt */
-  float cooling_du_dt = cooling_rate(phys_const, us, cosmo, cooling, p);
+  /* Convert to internal units */
+  float cooling_du_dt =
+      cooling_du_dt_cgs * cooling->conv_factor_energy_rate_from_cgs;
 
-  /* Integrate cooling equation to enforce energy floor */
-  /* Factor of 1.5 included since timestep could potentially double */
-  if (u_old + (hydro_du_dt + cooling_du_dt) * 1.5f * dt < u_floor) {
-    cooling_du_dt = -(u_old + 1.5f * dt * hydro_du_dt - u_floor) / (1.5f * dt);
+  /* Add cosmological term */
+  cooling_du_dt *= cosmo->a * cosmo->a;
+
+  float total_du_dt = hydro_du_dt + cooling_du_dt;
+
+  /* We now need to check that we are not going to go below any of the limits */
+
+  /* Limit imposed by the entropy floor */
+  const float A_floor = entropy_floor(p, cosmo, floor_props);
+  const float rho = hydro_get_physical_density(p, cosmo);
+  const float u_floor = gas_internal_energy_from_entropy(rho, A_floor);
+
+  /* Absolute minimum */
+  const float u_minimal = hydro_props->minimal_internal_energy;
+
+  /* Largest of both limits */
+  const float u_limit = max(u_minimal, u_floor);
+
+  /* First, check whether we may end up below the minimal energy after
+   * this step 1/2 kick + another 1/2 kick that could potentially be for
+   * a time-step twice as big. We hence check for 1.5 delta_t. */
+  if (u_old + total_du_dt * 1.5 * dt_therm < u_limit) {
+    total_du_dt = (u_limit - u_old) / (1.5f * dt_therm);
+  }
+
+  /* Second, check whether the energy used in the prediction could get negative.
+   * We need to check for the 1/2 dt kick followed by a full time-step drift
+   * that could potentially be for a time-step twice as big. We hence check
+   * for 2.5 delta_t but this time against 0 energy not the minimum */
+  if (u_old + total_du_dt * 2.5 * dt_therm < 0.) {
+    total_du_dt = -u_old / ((2.5f + 0.0001f) * dt_therm);
   }
 
   /* Update the internal energy time derivative */
-  hydro_set_internal_energy_dt(p, hydro_du_dt + cooling_du_dt);
+  hydro_set_physical_internal_energy_dt(p, cosmo, total_du_dt);
 
-  /* Store the radiated energy */
-  xp->cooling_data.radiated_energy += -hydro_get_mass(p) * cooling_du_dt * dt;
+  /* Store the radiated energy (assuming dt will not change) */
+  xp->cooling_data.radiated_energy +=
+      -hydro_get_mass(p) * (total_du_dt - hydro_du_dt) * dt_therm;
 }
 
 /**
- * @brief Computes the time-step due to cooling
+ * @brief Computes the time-step due to cooling for this particle.
+ *
+ * We compute a time-step \f$ \alpha \frac{u}{du/dt} \f$ in physical
+ * coordinates. \f$\alpha\f$ is a parameter of the cooling function.
  *
  * @param cooling The #cooling_function_data used in the run.
  * @param phys_const The physical constants in internal units.
  * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
  * @param us The internal system of units.
  * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended data of the particle.
  */
 __attribute__((always_inline)) INLINE static float cooling_timestep(
     const struct cooling_function_data* restrict cooling,
     const struct phys_const* restrict phys_const,
     const struct cosmology* restrict cosmo,
-    const struct unit_system* restrict us, const struct part* restrict p) {
+    const struct unit_system* restrict us,
+    const struct hydro_props* hydro_props, const struct part* restrict p,
+    const struct xpart* restrict xp) {
+
+  /* Start with the case where there is no limit */
+  if (cooling->cooling_tstep_mult == FLT_MAX) return FLT_MAX;
 
-  /* Get current internal energy */
-  const float u = hydro_get_physical_internal_energy(p, cosmo);
-  const float du_dt = cooling_rate(phys_const, us, cosmo, cooling, p);
+  /* Get current internal energy and cooling rate */
+  const float u = hydro_get_physical_internal_energy(p, xp, cosmo);
+  const double cooling_du_dt_cgs =
+      cooling_rate_cgs(cosmo, hydro_props, cooling, p);
 
-  /* If we are close to (or below) the energy floor, we ignore the condition */
-  if (u < 1.01f * cooling->min_energy)
+  /* Convert to internal units */
+  const float cooling_du_dt =
+      cooling_du_dt_cgs * cooling->conv_factor_energy_rate_from_cgs;
+
+  /* If we are close to (or below) the limit, we ignore the condition */
+  if (u < 1.01f * hydro_props->minimal_internal_energy || cooling_du_dt == 0.f)
     return FLT_MAX;
   else
-    return cooling->cooling_tstep_mult * u / fabsf(du_dt);
+    return cooling->cooling_tstep_mult * u / fabsf(cooling_du_dt);
 }
 
 /**
  * @brief Sets the cooling properties of the (x-)particles to a valid start
  * state.
  *
+ * Nothing to do here. Just set the radiated energy counter to 0.
+ *
+ * @param phys_const The physical constants in internal units.
+ * @param cooling The properties of the cooling function.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
  * @param p Pointer to the particle data.
  * @param xp Pointer to the extended particle data.
- * @param cooling The properties of the cooling function.
  */
 __attribute__((always_inline)) INLINE static void cooling_first_init_part(
     const struct phys_const* restrict phys_const,
@@ -152,6 +239,49 @@ __attribute__((always_inline)) INLINE static void cooling_first_init_part(
   xp->cooling_data.radiated_energy = 0.f;
 }
 
+/**
+ * @brief Compute the temperature of a #part based on the cooling function.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+INLINE static float cooling_get_temperature(
+    const struct phys_const* restrict phys_const,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, const struct xpart* restrict xp) {
+
+  /* Physical constants */
+  const double m_H = phys_const->const_proton_mass;
+  const double k_B = phys_const->const_boltzmann_k;
+
+  /* Gas properties */
+  const double T_transition = hydro_props->hydrogen_ionization_temperature;
+  const double mu_neutral = hydro_props->mu_neutral;
+  const double mu_ionised = hydro_props->mu_ionised;
+
+  /* Particle temperature */
+  const double u = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Temperature over mean molecular weight */
+  const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B;
+
+  /* Are we above or below the HII -> HI transition? */
+  if (T_over_mu > (T_transition + 1.) / mu_ionised)
+    return T_over_mu * mu_ionised;
+  else if (T_over_mu < (T_transition - 1.) / mu_neutral)
+    return T_over_mu * mu_neutral;
+  else
+    return T_transition;
+}
+
 /**
  * @brief Returns the total radiated energy by this particle.
  *
@@ -176,32 +306,37 @@ static INLINE void cooling_init_backend(struct swift_params* parameter_file,
                                         const struct phys_const* phys_const,
                                         struct cooling_function_data* cooling) {
 
-  const double lambda_cgs =
-      parser_get_param_double(parameter_file, "LambdaCooling:lambda_cgs");
-  const float min_temperature = parser_get_param_double(
-      parameter_file, "LambdaCooling:minimum_temperature");
-  cooling->hydrogen_mass_abundance = parser_get_param_double(
-      parameter_file, "LambdaCooling:hydrogen_mass_abundance");
-  cooling->mean_molecular_weight = parser_get_param_double(
-      parameter_file, "LambdaCooling:mean_molecular_weight");
-  cooling->cooling_tstep_mult = parser_get_param_double(
-      parameter_file, "LambdaCooling:cooling_tstep_mult");
-
-  /* convert minimum temperature into minimum internal energy */
-  const float u_floor =
-      phys_const->const_boltzmann_k * min_temperature /
-      (hydro_gamma_minus_one * cooling->mean_molecular_weight *
-       phys_const->const_proton_mass);
-
-  cooling->min_energy = u_floor;
-
-  /* convert lambda to code units */
-  cooling->lambda = lambda_cgs *
-                    units_cgs_conversion_factor(us, UNIT_CONV_TIME) /
-                    (units_cgs_conversion_factor(us, UNIT_CONV_ENERGY) *
-                     units_cgs_conversion_factor(us, UNIT_CONV_VOLUME));
+  /* Read in the cooling parameters */
+  cooling->lambda_nH2_cgs =
+      parser_get_param_double(parameter_file, "LambdaCooling:lambda_nH2_cgs");
+  cooling->cooling_tstep_mult = parser_get_opt_param_float(
+      parameter_file, "LambdaCooling:cooling_tstep_mult", FLT_MAX);
+
+  /* Some useful conversion values */
+  cooling->conv_factor_density_to_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_DENSITY);
+  cooling->conv_factor_energy_rate_from_cgs =
+      units_cgs_conversion_factor(us, UNIT_CONV_TIME) /
+      units_cgs_conversion_factor(us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
+
+  /* Useful constants */
+  cooling->proton_mass_cgs_inv =
+      1. / (phys_const->const_proton_mass *
+            units_cgs_conversion_factor(us, UNIT_CONV_MASS));
 }
 
+/**
+ * @brief Restore cooling tables (if applicable) after
+ * restart
+ *
+ * Nothing to do here
+ *
+ * @param cooling the cooling_function_data structure
+ * @param cosmo cosmology structure
+ */
+static INLINE void cooling_restore_tables(struct cooling_function_data* cooling,
+                                          const struct cosmology* cosmo) {}
+
 /**
  * @brief Prints the properties of the cooling model to stdout.
  *
@@ -211,11 +346,54 @@ static INLINE void cooling_print_backend(
     const struct cooling_function_data* cooling) {
 
   message(
-      "Cooling function is 'Constant lambda' with "
-      "(lambda,min_energy,hydrogen_mass_abundance,mean_molecular_weight) "
-      "=  (%g,%g,%g,%g)",
-      cooling->lambda, cooling->min_energy, cooling->hydrogen_mass_abundance,
-      cooling->mean_molecular_weight);
+      "Cooling function is 'Constant lambda' with Lambda/n_H^2=%g [erg * s^-1 "
+      "* "
+      "cm^3]",
+      cooling->lambda_nH2_cgs);
+
+  if (cooling->cooling_tstep_mult == FLT_MAX)
+    message("Cooling function time-step size is unlimited");
+  else
+    message("Cooling function time-step size limited to %f of u/(du/dt)",
+            cooling->cooling_tstep_mult);
+}
+
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * @param cooling the cooling data structure.
+ */
+static INLINE void cooling_clean(struct cooling_function_data* cooling) {}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * Nothing to do beyond writing the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+static INLINE void cooling_struct_dump(
+    const struct cooling_function_data* cooling, FILE* stream) {
+  restart_write_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                       stream, "cooling", "cooling function");
+}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Nothing to do beyond reading the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+static INLINE void cooling_struct_restore(struct cooling_function_data* cooling,
+                                          FILE* stream,
+                                          const struct cosmology* cosmo) {
+  restart_read_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                      stream, NULL, "cooling function");
 }
 
 #endif /* SWIFT_COOLING_CONST_LAMBDA_H */
diff --git a/src/cooling/const_lambda/cooling_io.h b/src/cooling/const_lambda/cooling_io.h
index 89c9471a291a4a6a5740a8c6c816913cbc6316a0..9437f0f94db41725d6715cf349843bf079137305 100644
--- a/src/cooling/const_lambda/cooling_io.h
+++ b/src/cooling/const_lambda/cooling_io.h
@@ -1,8 +1,6 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *                    Richard Bower (r.g.bower@durham.ac.uk)
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
  *                    Stefan Arridge  (stefan.arridge@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
@@ -22,38 +20,63 @@
 #ifndef SWIFT_COOLING_CONST_LAMBDA_IO_H
 #define SWIFT_COOLING_CONST_LAMBDA_IO_H
 
+/**
+ * @file src/cooling/const_lambda/cooling_io.h
+ * @brief i/o routines related to the "constant lambda" cooling function.
+ *
+ * This model assumes a constant cooling rate Lambda irrespective of redshift
+ * or density.
+ */
+
 /* Config parameters. */
 #include "../config.h"
 
 /* Local includes */
+#include "cooling.h"
 #include "io_properties.h"
 
 #ifdef HAVE_HDF5
 
 /**
- * @brief Writes the current model of SPH to the file
- * @param h_grpsph The HDF5 group in which to write
+ * @brief Writes the current model of cooling to the file
+ * @param h_grp The HDF5 group in which to write
+ * @param cooling the parameters of the cooling function.
  */
 __attribute__((always_inline)) INLINE static void cooling_write_flavour(
-    hid_t h_grpsph) {
+    hid_t h_grp, const struct cooling_function_data* cooling) {
 
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Constant Lambda");
+  io_write_attribute_s(h_grp, "Cooling Model", "Constant Lambda");
+  io_write_attribute_d(h_grp, "Lambda/n_H^2 [cgs]", cooling->lambda_nH2_cgs);
 }
 #endif
 
+INLINE static void convert_part_T(const struct engine* e, const struct part* p,
+                                  const struct xpart* xp, float* ret) {
+
+  ret[0] = cooling_get_temperature(e->physical_constants, e->hydro_properties,
+                                   e->internal_units, e->cosmology,
+                                   e->cooling_func, p, xp);
+}
+
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
  * @param parts The particle array.
+ * @param xparts The extended particle array.
  * @param list The list of i/o properties to write.
  * @param cooling The #cooling_function_data
  *
  * @return Returns the number of fields to write.
  */
 __attribute__((always_inline)) INLINE static int cooling_write_particles(
-    const struct xpart* xparts, struct io_props* list,
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
     const struct cooling_function_data* cooling) {
-  return 0;
+
+  list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1,
+                                              UNIT_CONV_TEMPERATURE, parts,
+                                              xparts, convert_part_T);
+
+  return 1;
 }
 
 #endif /* SWIFT_COOLING_CONST_LAMBDA_IO_H */
diff --git a/src/cooling/const_lambda/cooling_struct.h b/src/cooling/const_lambda/cooling_struct.h
index 30d4e5e4af9c7bd139337709897d8111f88d2aa8..cc671a857887af90bda630e757af1b044b479e49 100644
--- a/src/cooling/const_lambda/cooling_struct.h
+++ b/src/cooling/const_lambda/cooling_struct.h
@@ -1,8 +1,6 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *                    Richard Bower (r.g.bower@durham.ac.uk)
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
  *                    Stefan Arridge  (stefan.arridge@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
@@ -19,26 +17,34 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-
 #ifndef SWIFT_COOLING_STRUCT_CONST_LAMBDA_H
 #define SWIFT_COOLING_STRUCT_CONST_LAMBDA_H
 
+/**
+ * @file src/cooling/const_lambda/cooling_struct.h
+ * @brief Structures related to the "constant lambda" cooling function.
+ *
+ * This model assumes a constant cooling rate Lambda irrespective of redshift
+ * or density.
+ */
+
 /**
  * @brief Properties of the cooling function.
  */
 struct cooling_function_data {
 
-  /*! Cooling rate in internal units */
-  double lambda;
+  /*! Cooling rate / nH^2 in physical cgs units [erg * s^-1 * cm^3] */
+  double lambda_nH2_cgs;
 
-  /*! Fraction of gas mass that is Hydrogen. Used to calculate n_H */
-  float hydrogen_mass_abundance;
+  /*! Conversion factor from internal units to cgs for density */
+  double conv_factor_density_to_cgs;
 
-  /*! 'mu', used to convert min_temperature to min_internal energy */
-  float mean_molecular_weight;
+  /*! Conversion factor from internal units from cgs for internal energy
+   * derivative */
+  double conv_factor_energy_rate_from_cgs;
 
-  /*! Minimally allowed internal energy of all the particles */
-  float min_energy;
+  /*! Inverse of the proton mass in cgs units [g^-1] */
+  double proton_mass_cgs_inv;
 
   /*! Constant multiplication factor for time-step criterion */
   float cooling_tstep_mult;
diff --git a/src/cooling/grackle/cooling.h b/src/cooling/grackle/cooling.h
index cb77b63294aacee425b917c1900eefd7ebfa5f34..2632de7e223306a6c6400e350f8cb62a62e58206 100644
--- a/src/cooling/grackle/cooling.h
+++ b/src/cooling/grackle/cooling.h
@@ -24,9 +24,6 @@
  * @brief Cooling using the GRACKLE 3.0 library.
  */
 
-/* Config parameters. */
-#include "../config.h"
-
 /* Some standard headers. */
 #include <float.h>
 #include <math.h>
@@ -36,6 +33,8 @@
 
 /* Local includes. */
 #include "chemistry.h"
+#include "cooling_io.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "hydro.h"
 #include "parser.h"
@@ -47,20 +46,35 @@
 #define GRACKLE_NPART 1
 #define GRACKLE_RANK 3
 
-/* prototypes */
+/* prototype */
 static gr_float cooling_time(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
     const struct cooling_function_data* restrict cooling,
-    const struct part* restrict p, struct xpart* restrict xp);
+    const struct part* restrict p, const struct xpart* restrict xp);
+static gr_float cooling_new_energy(
+    const struct phys_const* restrict phys_const,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, struct xpart* restrict xp, double dt);
 
-static double cooling_rate(const struct phys_const* restrict phys_const,
-                           const struct unit_system* restrict us,
-                           const struct cosmology* restrict cosmo,
-                           const struct cooling_function_data* restrict cooling,
-                           const struct part* restrict p,
-                           struct xpart* restrict xp, double dt);
+/**
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift.
+ *
+ * @param cosmo The current cosmological model.
+ * @param cooling The #cooling_function_data used in the run.
+ */
+INLINE static void cooling_update(const struct cosmology* cosmo,
+                                  struct cooling_function_data* cooling) {
+  /* set current time */
+  if (cooling->redshift == -1)
+    cooling->units.a_value = cosmo->a;
+  else
+    cooling->units.a_value = 1. / (1. + cooling->redshift);
+}
 
 /**
  * @brief Print the chemical network
@@ -163,7 +177,7 @@ __attribute__((always_inline)) INLINE static void cooling_compute_equilibrium(
   const double alpha = 0.01;
   double dt =
       fabs(cooling_time(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp));
-  cooling_rate(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp, dt);
+  cooling_new_energy(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp, dt);
   dt = alpha *
        fabs(cooling_time(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp));
 
@@ -179,7 +193,7 @@ __attribute__((always_inline)) INLINE static void cooling_compute_equilibrium(
     old = *xp;
 
     /* update chemistry */
-    cooling_rate(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp, dt);
+    cooling_new_energy(phys_const, us, cosmo, &cooling_tmp, &p_tmp, xp, dt);
   } while (step < max_step && !cooling_converged(xp, &old, conv_limit));
 
   if (step == max_step)
@@ -276,7 +290,8 @@ __attribute__((always_inline)) INLINE static void cooling_print_backend(
   message("\tLength       = %g", cooling->units.length_units);
   message("\tDensity      = %g", cooling->units.density_units);
   message("\tTime         = %g", cooling->units.time_units);
-  message("\tScale Factor = %g", cooling->units.a_units);
+  message("\tScale Factor = %g (units: %g)", cooling->units.a_value,
+          cooling->units.a_units);
 }
 
 /**
@@ -473,7 +488,8 @@ __attribute__((always_inline)) INLINE static void cooling_print_backend(
   cooling_copy_from_grackle3(data, p, xp, rho);
 
 /**
- * @brief Compute the cooling rate and update the particle chemistry data
+ * @brief Compute the energy of a particle after dt and update the particle
+ * chemistry data
  *
  * @param phys_const The physical constants in internal units.
  * @param us The internal system of units.
@@ -484,7 +500,7 @@ __attribute__((always_inline)) INLINE static void cooling_print_backend(
  *
  * @return du / dt
  */
-__attribute__((always_inline)) INLINE static gr_float cooling_rate(
+__attribute__((always_inline)) INLINE static gr_float cooling_new_energy(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
@@ -493,10 +509,6 @@ __attribute__((always_inline)) INLINE static gr_float cooling_rate(
 
   /* set current time */
   code_units units = cooling->units;
-  if (cooling->redshift == -1)
-    units.a_value = cosmo->a;
-  else
-    units.a_value = 1. / (1. + cooling->redshift);
 
   /* initialize data */
   grackle_field_data data;
@@ -515,7 +527,7 @@ __attribute__((always_inline)) INLINE static gr_float cooling_rate(
 
   /* general particle data */
   gr_float density = hydro_get_physical_density(p, cosmo);
-  const double energy_before = hydro_get_physical_internal_energy(p, cosmo);
+  const float energy_before = hydro_get_physical_internal_energy(p, xp, cosmo);
   gr_float energy = energy_before;
 
   /* initialize density */
@@ -534,28 +546,15 @@ __attribute__((always_inline)) INLINE static gr_float cooling_rate(
 
   /* solve chemistry */
   chemistry_data chemistry_grackle = cooling->chemistry;
-  chemistry_data_storage my_rates = grackle_rates;
-  int error_code = _solve_chemistry(
-      &chemistry_grackle, &my_rates, &units, dt, data.grid_dx, data.grid_rank,
-      data.grid_dimension, data.grid_start, data.grid_end, data.density,
-      data.internal_energy, data.x_velocity, data.y_velocity, data.z_velocity,
-      data.HI_density, data.HII_density, data.HM_density, data.HeI_density,
-      data.HeII_density, data.HeIII_density, data.H2I_density,
-      data.H2II_density, data.DI_density, data.DII_density, data.HDI_density,
-      data.e_density, data.metal_density, data.volumetric_heating_rate,
-      data.specific_heating_rate, data.RT_heating_rate,
-      data.RT_HI_ionization_rate, data.RT_HeI_ionization_rate,
-      data.RT_HeII_ionization_rate, data.RT_H2_dissociation_rate, NULL);
-  if (error_code == 0) error("Error in solve_chemistry.");
-  // if (solve_chemistry(&units, &data, dt) == 0) {
-  //  error("Error in solve_chemistry.");
-  //}
+  if (local_solve_chemistry(&chemistry_grackle, &grackle_rates, &units, &data,
+                            dt) == 0) {
+    error("Error in solve_chemistry.");
+  }
 
   /* copy from grackle data to particle */
   cooling_copy_from_grackle(data, p, xp, density);
 
-  /* compute rate */
-  return (energy - energy_before) / dt;
+  return energy;
 }
 
 /**
@@ -572,14 +571,10 @@ __attribute__((always_inline)) INLINE static gr_float cooling_time(
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
     const struct cooling_function_data* restrict cooling,
-    const struct part* restrict p, struct xpart* restrict xp) {
+    const struct part* restrict p, const struct xpart* restrict xp) {
 
   /* set current time */
   code_units units = cooling->units;
-  if (cooling->redshift == -1)
-    error("TODO time dependant redshift");
-  else
-    units.a_value = 1. / (1. + cooling->redshift);
 
   /* initialize data */
   grackle_field_data data;
@@ -596,7 +591,8 @@ __attribute__((always_inline)) INLINE static gr_float cooling_time(
   data.grid_end = grid_end;
 
   /* general particle data */
-  const gr_float energy_before = hydro_get_physical_internal_energy(p, cosmo);
+  const gr_float energy_before =
+      hydro_get_physical_internal_energy(p, xp, cosmo);
   gr_float density = hydro_get_physical_density(p, cosmo);
   gr_float energy = energy_before;
 
@@ -616,7 +612,10 @@ __attribute__((always_inline)) INLINE static gr_float cooling_time(
 
   /* Compute cooling time */
   gr_float cooling_time;
-  if (calculate_cooling_time(&units, &data, &cooling_time) == 0) {
+  chemistry_data chemistry_grackle = cooling->chemistry;
+  chemistry_data_storage chemistry_rates = grackle_rates;
+  if (local_calculate_cooling_time(&chemistry_grackle, &chemistry_rates, &units,
+                                   &data, &cooling_time) == 0) {
     error("Error in calculate_cooling_time.");
   }
 
@@ -636,27 +635,76 @@ __attribute__((always_inline)) INLINE static gr_float cooling_time(
  * @param cooling The #cooling_function_data used in the run.
  * @param p Pointer to the particle data.
  * @param dt The time-step of this particle.
+ * @param hydro_properties the hydro_props struct, used for
+ * getting the minimal internal energy allowed in by SWIFT.
+ * Read from yml file into engine struct.
  */
 __attribute__((always_inline)) INLINE static void cooling_cool_part(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
+    const struct hydro_props* hydro_props,
+    const struct entropy_floor_properties* floor_props,
     const struct cooling_function_data* restrict cooling,
-    struct part* restrict p, struct xpart* restrict xp, double dt) {
+    struct part* restrict p, struct xpart* restrict xp, double dt,
+    double dt_therm) {
 
+  /* Nothing to do here? */
   if (dt == 0.) return;
 
-  /* Current du_dt */
-  const float hydro_du_dt = hydro_get_internal_energy_dt(p);
+  /* Current energy */
+  const float u_old = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Current du_dt in physical coordinates (internal units) */
+  const float hydro_du_dt = hydro_get_physical_internal_energy_dt(p, cosmo);
+
+  /* Calculate energy after dt */
+  gr_float u_new =
+      cooling_new_energy(phys_const, us, cosmo, cooling, p, xp, dt);
+
+  float delta_u = u_new - u_old + hydro_du_dt * dt_therm;
+
+  /* We now need to check that we are not going to go below any of the limits */
+
+  /* First, check whether we may end up below the minimal energy after
+   * this step 1/2 kick + another 1/2 kick that could potentially be for
+   * a time-step twice as big. We hence check for 1.5 delta_u. */
+  if (u_old + 1.5 * delta_u < hydro_props->minimal_internal_energy) {
+    delta_u = (hydro_props->minimal_internal_energy - u_old) / 1.5;
+  }
+
+  /* Second, check whether the energy used in the prediction could get negative.
+   * We need to check for the 1/2 dt kick followed by a full time-step drift
+   * that could potentially be for a time-step twice as big. We hence check
+   * for 2.5 delta_u but this time against 0 energy not the minimum.
+   * To avoid numerical rounding bringing us below 0., we add a tiny tolerance.
+   */
+  const float rounding_tolerance = 1.0e-4;
 
-  /* compute cooling rate */
-  const float du_dt = cooling_rate(phys_const, us, cosmo, cooling, p, xp, dt);
+  if (u_old + 2.5 * delta_u < 0.) {
+    delta_u = -u_old / (2.5 + rounding_tolerance);
+  }
+
+  /* Turn this into a rate of change (including cosmology term) */
+  const float cooling_du_dt = delta_u / dt_therm;
 
-  /* record energy lost */
-  xp->cooling_data.radiated_energy += -du_dt * dt * hydro_get_mass(p);
+  /* Update the internal energy time derivative */
+  hydro_set_physical_internal_energy_dt(p, cosmo, cooling_du_dt);
 
-  /* Update the internal energy */
-  hydro_set_internal_energy_dt(p, hydro_du_dt + du_dt);
+  /* Store the radiated energy */
+  xp->cooling_data.radiated_energy -= hydro_get_mass(p) * cooling_du_dt * dt;
+}
+
+static INLINE float cooling_get_temperature(
+    const struct phys_const* restrict phys_const,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, const struct xpart* restrict xp) {
+
+  error("This function needs implementing!!");
+  return 0.;
 }
 
 /**
@@ -674,7 +722,9 @@ __attribute__((always_inline)) INLINE static float cooling_timestep(
     const struct cooling_function_data* restrict cooling,
     const struct phys_const* restrict phys_const,
     const struct cosmology* restrict cosmo,
-    const struct unit_system* restrict us, const struct part* restrict p) {
+    const struct unit_system* restrict us,
+    const struct hydro_props* hydro_props, const struct part* restrict p,
+    const struct xpart* restrict xp) {
 
   return FLT_MAX;
 }
@@ -691,7 +741,7 @@ __attribute__((always_inline)) INLINE static void cooling_init_units(
   /* These are conversions from code units to cgs. */
 
   /* first cosmo */
-  cooling->units.a_units = 1.0;  // units for the expansion factor (1/1+zi)
+  cooling->units.a_units = 1.0;  // units for the expansion factor
   cooling->units.a_value = 1.0;
 
   /* We assume here all physical quantities to
@@ -700,12 +750,12 @@ __attribute__((always_inline)) INLINE static void cooling_init_units(
 
   /* then units */
   cooling->units.density_units =
-      us->UnitMass_in_cgs / pow(us->UnitLength_in_cgs, 3);
-  cooling->units.length_units = us->UnitLength_in_cgs;
-  cooling->units.time_units = us->UnitTime_in_cgs;
-  cooling->units.velocity_units = cooling->units.a_units *
-                                  cooling->units.length_units /
-                                  cooling->units.time_units;
+      units_cgs_conversion_factor(us, UNIT_CONV_DENSITY);
+  cooling->units.length_units =
+      units_cgs_conversion_factor(us, UNIT_CONV_LENGTH);
+  cooling->units.time_units = units_cgs_conversion_factor(us, UNIT_CONV_TIME);
+  cooling->units.velocity_units =
+      units_cgs_conversion_factor(us, UNIT_CONV_VELOCITY);
 }
 
 /**
@@ -784,7 +834,49 @@ __attribute__((always_inline)) INLINE static void cooling_init_backend(
   /* Set up the units system. */
   cooling_init_units(us, cooling);
 
+  /* Set up grackle */
   cooling_init_grackle(cooling);
 }
 
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * @param cooling the cooling data structure.
+ */
+static INLINE void cooling_clean(struct cooling_function_data* cooling) {
+
+  // MATTHIEU: To do: free stuff here
+}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * Nothing to do beyond writing the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+static INLINE void cooling_struct_dump(
+    const struct cooling_function_data* cooling, FILE* stream) {
+  restart_write_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                       stream, "cooling", "cooling function");
+}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Nothing to do beyond reading the structure from the stream.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+static INLINE void cooling_struct_restore(struct cooling_function_data* cooling,
+                                          FILE* stream,
+                                          const struct cosmology* cosmo) {
+  restart_read_blocks((void*)cooling, sizeof(struct cooling_function_data), 1,
+                      stream, NULL, "cooling function");
+}
+
 #endif /* SWIFT_COOLING_GRACKLE_H */
diff --git a/src/cooling/grackle/cooling_io.h b/src/cooling/grackle/cooling_io.h
index faf84cf97d8449d54f2727ec26b16a9d81d117c6..3905cafd05fb8e15ddf33f4ea688d6144698df73 100644
--- a/src/cooling/grackle/cooling_io.h
+++ b/src/cooling/grackle/cooling_io.h
@@ -19,9 +19,6 @@
 #ifndef SWIFT_COOLING_GRACKLE_IO_H
 #define SWIFT_COOLING_GRACKLE_IO_H
 
-/* Config parameters. */
-#include "../config.h"
-
 /* Local includes */
 #include "cooling_struct.h"
 #include "io_properties.h"
@@ -29,20 +26,20 @@
 #ifdef HAVE_HDF5
 
 /**
- * @brief Writes the current model of SPH to the file
- * @param h_grpsph The HDF5 group in which to write
+ * @brief Writes the current model of cooling  to the file
+ * @param h_grp The HDF5 group in which to write
  */
 __attribute__((always_inline)) INLINE static void cooling_write_flavour(
-    hid_t h_grpsph) {
+    hid_t h_grp, const struct cooling_function_data* cooling) {
 
 #if COOLING_GRACKLE_MODE == 0
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Grackle");
+  io_write_attribute_s(h_grp, "Cooling Model", "Grackle");
 #elif COOLING_GRACKLE_MODE == 1
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Grackle1");
+  io_write_attribute_s(h_grp, "Cooling Model", "Grackle1");
 #elif COOLING_GRACKLE_MODE == 2
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Grackle2");
+  io_write_attribute_s(h_grp, "Cooling Model", "Grackle2");
 #elif COOLING_GRACKLE_MODE == 3
-  io_write_attribute_s(h_grpsph, "Cooling Model", "Grackle3");
+  io_write_attribute_s(h_grp, "Cooling Model", "Grackle3");
 #else
   error("This function should be called only with one of the Grackle cooling.");
 #endif
@@ -59,13 +56,11 @@ __attribute__((always_inline)) INLINE static void cooling_write_flavour(
  * @return Returns the number of fields to write.
  */
 __attribute__((always_inline)) INLINE static int cooling_write_particles(
-    const struct xpart* xparts, struct io_props* list,
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
     const struct cooling_function_data* cooling) {
 
   int num = 0;
 
-  if (cooling->output_mode == 0) return num;
-
 #if COOLING_GRACKLE_MODE >= 1
   /* List what we want to write */
   list[0] = io_make_output_field("HI", FLOAT, 1, UNIT_CONV_NO_UNITS, xparts,
@@ -89,8 +84,6 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles(
   num += 6;
 #endif
 
-  if (cooling->output_mode == 1) return num;
-
 #if COOLING_GRACKLE_MODE >= 2
   list += num;
 
@@ -106,8 +99,6 @@ __attribute__((always_inline)) INLINE static int cooling_write_particles(
   num += 3;
 #endif
 
-  if (cooling->output_mode == 2) return num;
-
 #if COOLING_GRACKLE_MODE >= 3
   list += num;
 
@@ -156,9 +147,6 @@ __attribute__((always_inline)) INLINE static void cooling_read_parameters(
   cooling->self_shielding_method = parser_get_opt_param_int(
       parameter_file, "GrackleCooling:SelfShieldingMethod", 0);
 
-  cooling->output_mode =
-      parser_get_opt_param_int(parameter_file, "GrackleCooling:OutputMode", 0);
-
   cooling->max_step = parser_get_opt_param_int(
       parameter_file, "GrackleCooling:MaxSteps", 10000);
 
diff --git a/src/cooling/grackle/cooling_struct.h b/src/cooling/grackle/cooling_struct.h
index b714690ce4688268723748b29506e458cccc4be9..66c385234cccf6532100e86f2c16a508b1112baa 100644
--- a/src/cooling/grackle/cooling_struct.h
+++ b/src/cooling/grackle/cooling_struct.h
@@ -16,14 +16,12 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_COOLING_STRUCT_NONE_H
-#define SWIFT_COOLING_STRUCT_NONE_H
+#ifndef SWIFT_COOLING_STRUCT_GRACKLE_H
+#define SWIFT_COOLING_STRUCT_GRACKLE_H
 
 /* include grackle */
 #include <grackle.h>
 
-#include "../config.h"
-
 /**
  * @file src/cooling/none/cooling_struct.h
  * @brief Empty infrastructure for the cases without cooling function
@@ -61,9 +59,6 @@ struct cooling_function_data {
   /* Self shielding method (<= 3) means grackle modes */
   int self_shielding_method;
 
-  /* Output mode (correspond to primordial chemistry mode */
-  int output_mode;
-
   /* convergence limit for first init */
   float convergence_limit;
 
@@ -113,4 +108,4 @@ struct cooling_xpart_data {
   float metal_frac;
 };
 
-#endif /* SWIFT_COOLING_STRUCT_NONE_H */
+#endif /* SWIFT_COOLING_STRUCT_GRACKLE_H */
diff --git a/src/cooling/none/cooling.h b/src/cooling/none/cooling.h
index 0cc465adcdad8fe19afe4a9867e5d68a22ed9119..3f90d357ad863da0525859f06e85a4cc492d3ae2 100644
--- a/src/cooling/none/cooling.h
+++ b/src/cooling/none/cooling.h
@@ -23,18 +23,33 @@
  * @file src/cooling/none/cooling.h
  * @brief Empty infrastructure for the cases without cooling function
  */
+#include "../config.h"
 
 /* Some standard headers. */
 #include <float.h>
 #include <math.h>
 
 /* Local includes. */
-#include "error.h"
+#include "cooling_struct.h"
+#include "cosmology.h"
+#include "entropy_floor.h"
 #include "hydro.h"
-#include "parser.h"
+#include "hydro_properties.h"
 #include "part.h"
-#include "physical_constants.h"
-#include "units.h"
+
+/**
+ * @brief Common operations performed on the cooling function at a
+ * given time-step or redshift.
+ *
+ * @param phys_const The physical constants in internal units.
+ * @param us The internal system of units.
+ * @param cosmo The current cosmological model.
+ * @param cooling The #cooling_function_data used in the run.
+ */
+INLINE static void cooling_update(const struct cosmology* cosmo,
+                                  struct cooling_function_data* cooling) {
+  // Add content if required.
+}
 
 /**
  * @brief Apply the cooling function to a particle.
@@ -44,17 +59,22 @@
  * @param phys_const The physical constants in internal units.
  * @param us The internal system of units.
  * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
  * @param cooling The #cooling_function_data used in the run.
  * @param p Pointer to the particle data.
  * @param xp Pointer to the extended particle data.
  * @param dt The time-step of this particle.
+ * @param dt_therm The time-step operator used for thermal quantities.
  */
 __attribute__((always_inline)) INLINE static void cooling_cool_part(
     const struct phys_const* restrict phys_const,
     const struct unit_system* restrict us,
     const struct cosmology* restrict cosmo,
+    const struct hydro_props* hydro_props,
+    const struct entropy_floor_properties* floor_props,
     const struct cooling_function_data* restrict cooling,
-    struct part* restrict p, struct xpart* restrict xp, float dt) {}
+    struct part* restrict p, struct xpart* restrict xp, const float dt,
+    const float dt_therm) {}
 
 /**
  * @brief Computes the cooling time-step.
@@ -64,14 +84,18 @@ __attribute__((always_inline)) INLINE static void cooling_cool_part(
  * @param cooling The #cooling_function_data used in the run.
  * @param phys_const The physical constants in internal units.
  * @param cosmo The current cosmological model.
+ * @param hydro_props The properties of the hydro scheme.
  * @param us The internal system of units.
  * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended data of the particle.
  */
 __attribute__((always_inline)) INLINE static float cooling_timestep(
     const struct cooling_function_data* restrict cooling,
     const struct phys_const* restrict phys_const,
     const struct cosmology* restrict cosmo,
-    const struct unit_system* restrict us, const struct part* restrict p) {
+    const struct unit_system* restrict us,
+    const struct hydro_props* hydro_props, const struct part* restrict p,
+    const struct xpart* restrict xp) {
 
   return FLT_MAX;
 }
@@ -96,6 +120,49 @@ __attribute__((always_inline)) INLINE static void cooling_first_init_part(
     const struct cooling_function_data* data, const struct part* restrict p,
     struct xpart* restrict xp) {}
 
+/**
+ * @brief Compute the temperature of a #part based on the cooling function.
+ *
+ * @param phys_const #phys_const data structure.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cosmo #cosmology data structure.
+ * @param cooling #cooling_function_data struct.
+ * @param p #part data.
+ * @param xp Pointer to the #xpart data.
+ */
+INLINE static float cooling_get_temperature(
+    const struct phys_const* restrict phys_const,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cosmology* restrict cosmo,
+    const struct cooling_function_data* restrict cooling,
+    const struct part* restrict p, const struct xpart* restrict xp) {
+
+  /* Physical constants */
+  const double m_H = phys_const->const_proton_mass;
+  const double k_B = phys_const->const_boltzmann_k;
+
+  /* Gas properties */
+  const double T_transition = hydro_props->hydrogen_ionization_temperature;
+  const double mu_neutral = hydro_props->mu_neutral;
+  const double mu_ionised = hydro_props->mu_ionised;
+
+  /* Particle temperature */
+  const double u = hydro_get_physical_internal_energy(p, xp, cosmo);
+
+  /* Temperature over mean molecular weight */
+  const double T_over_mu = hydro_gamma_minus_one * u * m_H / k_B;
+
+  /* Are we above or below the HII -> HI transition? */
+  if (T_over_mu > (T_transition + 1.) / mu_ionised)
+    return T_over_mu * mu_ionised;
+  else if (T_over_mu < (T_transition - 1.) / mu_neutral)
+    return T_over_mu * mu_neutral;
+  else
+    return T_transition;
+}
+
 /**
  * @brief Returns the total radiated energy by this particle.
  *
@@ -136,4 +203,36 @@ static INLINE void cooling_print_backend(
   message("Cooling function is 'No cooling'.");
 }
 
+/**
+ * @brief Clean-up the memory allocated for the cooling routines
+ *
+ * @param cooling the cooling data structure.
+ */
+static INLINE void cooling_clean(struct cooling_function_data* cooling) {}
+
+/**
+ * @brief Write a cooling struct to the given FILE as a stream of bytes.
+ *
+ * Empty structure so nothing to do here.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ */
+static INLINE void cooling_struct_dump(
+    const struct cooling_function_data* cooling, FILE* stream) {}
+
+/**
+ * @brief Restore a hydro_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Empty structure so nothing to do here.
+ *
+ * @param cooling the struct
+ * @param stream the file stream
+ * @param cosmo #cosmology structure
+ */
+static INLINE void cooling_struct_restore(struct cooling_function_data* cooling,
+                                          FILE* stream,
+                                          const struct cosmology* cosmo) {}
+
 #endif /* SWIFT_COOLING_NONE_H */
diff --git a/src/cooling/none/cooling_io.h b/src/cooling/none/cooling_io.h
index e4c84f506bcd31ff95ededb5be889fbf9a27261b..16b4b4ca29f8ebd325decc25420d7db617e1e4ef 100644
--- a/src/cooling/none/cooling_io.h
+++ b/src/cooling/none/cooling_io.h
@@ -23,24 +23,35 @@
 #include "../config.h"
 
 /* Local includes */
+#include "cooling.h"
 #include "io_properties.h"
 
 #ifdef HAVE_HDF5
 
 /**
  * @brief Writes the current model of SPH to the file
- * @param h_grpsph The HDF5 group in which to write
+ * @param h_grp The HDF5 group in which to write
+ * @param cooling the parameters of the cooling function.
  */
 __attribute__((always_inline)) INLINE static void cooling_write_flavour(
-    hid_t h_grpsph) {
+    hid_t h_grp, const struct cooling_function_data* cooling) {
 
-  io_write_attribute_s(h_grpsph, "Cooling Model", "None");
+  io_write_attribute_s(h_grp, "Cooling Model", "None");
 }
 #endif
 
+INLINE static void convert_part_T(const struct engine* e, const struct part* p,
+                                  const struct xpart* xp, float* ret) {
+
+  ret[0] = cooling_get_temperature(e->physical_constants, e->hydro_properties,
+                                   e->internal_units, e->cosmology,
+                                   e->cooling_func, p, xp);
+}
+
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
+ * @param parts The particle array.
  * @param xparts The extended particle array.
  * @param list The list of i/o properties to write.
  * @param cooling The #cooling_function_data
@@ -48,9 +59,13 @@ __attribute__((always_inline)) INLINE static void cooling_write_flavour(
  * @return Returns the number of fields to write.
  */
 __attribute__((always_inline)) INLINE static int cooling_write_particles(
-    const struct xpart* xparts, struct io_props* list,
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
     const struct cooling_function_data* cooling) {
-  return 0;
+
+  list[0] = io_make_output_field_convert_part("Temperature", FLOAT, 1,
+                                              UNIT_CONV_TEMPERATURE, parts,
+                                              xparts, convert_part_T);
+  return 1;
 }
 
 #endif /* SWIFT_COOLING_NONE_IO_H */
diff --git a/src/cooling_io.h b/src/cooling_io.h
index 88eeae2cabdaa8a0909977b84a7dbcf03145d988..1ced353d7ff8320a48731545300274c654a20744 100644
--- a/src/cooling_io.h
+++ b/src/cooling_io.h
@@ -29,6 +29,8 @@
 #include "./cooling/const_du/cooling_io.h"
 #elif defined(COOLING_CONST_LAMBDA)
 #include "./cooling/const_lambda/cooling_io.h"
+#elif defined(COOLING_COMPTON)
+#include "./cooling/Compton/cooling_io.h"
 #elif defined(COOLING_GRACKLE)
 #include "./cooling/grackle/cooling_io.h"
 #elif defined(COOLING_EAGLE)
diff --git a/src/cooling_struct.h b/src/cooling_struct.h
index 9c187d596e714fddaf60ae61323624569196ba70..93de8d1b7a0bcfd56d2b1a503aea1e8339bc8016 100644
--- a/src/cooling_struct.h
+++ b/src/cooling_struct.h
@@ -34,6 +34,8 @@
 #include "./cooling/const_du/cooling_struct.h"
 #elif defined(COOLING_CONST_LAMBDA)
 #include "./cooling/const_lambda/cooling_struct.h"
+#elif defined(COOLING_COMPTON)
+#include "./cooling/Compton/cooling_struct.h"
 #elif defined(COOLING_GRACKLE)
 #include "./cooling/grackle/cooling_struct.h"
 #elif defined(COOLING_EAGLE)
diff --git a/src/cosmology.c b/src/cosmology.c
index ff1116bd65837f6718903f7924c6076b5ff40c26..be23343d0d62584cd3a811e547b327120db744ef 100644
--- a/src/cosmology.c
+++ b/src/cosmology.c
@@ -157,6 +157,8 @@ void cosmology_update(struct cosmology *c, const struct phys_const *phys_const,
       pow(a, -3. * hydro_gamma + 2.); /* 1 / a^(3*gamma - 2) */
   c->a_factor_mu =
       pow(a, 0.5 * (3. * hydro_gamma - 5.)); /* a^{(3*gamma - 5) / 2} */
+  c->a_factor_Balsara_eps =
+      pow(a, 0.5 * (1. - 3. * hydro_gamma)); /* a^{(1 - 3*gamma) / 2} */
 
   /* Redshift */
   c->z = a_inv - 1.;
@@ -487,6 +489,11 @@ void cosmology_init(struct swift_params *params, const struct unit_system *us,
   c->time_base = (c->log_a_end - c->log_a_begin) / max_nr_timesteps;
   c->time_base_inv = 1. / c->time_base;
 
+  /* If a_begin == a_end we hang */
+
+  if (c->a_begin >= c->a_end)
+    error("a_begin must be strictly before (and not equal to) a_end");
+
   /* Construct derived quantities */
 
   /* Curvature density (for closure) */
@@ -502,6 +509,10 @@ void cosmology_init(struct swift_params *params, const struct unit_system *us,
   c->H0 = H0_cgs * units_cgs_conversion_factor(us, UNIT_CONV_TIME);
   c->Hubble_time = 1. / c->H0;
 
+  /* Critical density at present day */
+  c->critical_density_0 =
+      3. * c->H0 * c->H0 / (8. * M_PI * phys_const->const_newton_G);
+
   /* Initialise the interpolation tables */
   c->drift_fac_interp_table = NULL;
   c->grav_kick_fac_interp_table = NULL;
@@ -535,7 +546,7 @@ void cosmology_init_no_cosmo(struct cosmology *c) {
   c->w_0 = 0.;
   c->w_a = 0.;
   c->h = 1.;
-  c->w = 0.;
+  c->w = -1.;
 
   c->a_begin = 1.;
   c->a_end = 1.;
@@ -543,7 +554,9 @@ void cosmology_init_no_cosmo(struct cosmology *c) {
   c->log_a_end = 0.;
 
   c->H = 0.;
+  c->H0 = 0.;
   c->a = 1.;
+  c->z = 0.;
   c->a_inv = 1.;
   c->a2_inv = 1.;
   c->a3_inv = 1.;
@@ -551,16 +564,20 @@ void cosmology_init_no_cosmo(struct cosmology *c) {
   c->a_factor_pressure = 1.;
   c->a_factor_sound_speed = 1.;
   c->a_factor_mu = 1.;
+  c->a_factor_Balsara_eps = 1.;
   c->a_factor_hydro_accel = 1.;
   c->a_factor_grav_accel = 1.;
 
   c->critical_density = 0.;
+  c->critical_density_0 = 0.;
 
   c->time_step_factor = 1.;
 
   c->a_dot = 0.;
   c->time = 0.;
   c->universe_age_at_present_day = 0.;
+  c->Hubble_time = 0.;
+  c->lookback_time = 0.;
 
   /* Initialise the interpolation tables */
   c->drift_fac_interp_table = NULL;
diff --git a/src/cosmology.h b/src/cosmology.h
index 4556b039bd0e306dab37a05bc200c3aa2ab8a602..d6b7dfbdc854a66f89c5511a5076c4fb4a7a5d3f 100644
--- a/src/cosmology.h
+++ b/src/cosmology.h
@@ -54,10 +54,12 @@ struct cosmology {
   /*! Power of the scale-factor used for sound-speed conversion to physical */
   double a_factor_sound_speed;
 
-  /*! Power of the scale-factor used for relative velocities in viscosity term
-   */
+  /*! Power of the scale-factor used for relative velocities in visc. terms */
   double a_factor_mu;
 
+  /*! {ower of the scale-factor used for epsilon term in the Balsara switch */
+  double a_factor_Balsara_eps;
+
   /*! Power of the scale-factor used for gravity accelerations */
   double a_factor_grav_accel;
 
@@ -73,6 +75,9 @@ struct cosmology {
   /*! The critical density at the current redshift (in internal units) */
   double critical_density;
 
+  /*! The critical density at redshift 0 (in internal units) */
+  double critical_density_0;
+
   /*! Conversion factor from internal time-step size to cosmological step */
   double time_step_factor;
 
diff --git a/src/cycle.h b/src/cycle.h
index 842510e066e2f6f94e736851bf636c9a73e4f25f..0ba1277893fe7be7d8b62cdcb9a8873d5709eb60 100644
--- a/src/cycle.h
+++ b/src/cycle.h
@@ -80,6 +80,7 @@ intrinsic.])], [rtc_ok=no])
 
 /***************************************************************************/
 
+#include <stdint.h>
 #if TIME_WITH_SYS_TIME
 #include <sys/time.h>
 #include <time.h>
@@ -531,7 +532,7 @@ INLINE_ELAPSED(inline)
 #endif
 
 #if defined(__aarch64__) && defined(HAVE_ARMV8_CNTVCT_EL0) && \
-    !defined(HAVE_ARMV8_PMCCNTR_EL0)
+    !defined(HAVE_TICK_COUNTER)
 typedef uint64_t ticks;
 static inline ticks getticks(void) {
   uint64_t Rt;
@@ -542,7 +543,8 @@ INLINE_ELAPSED(inline)
 #define HAVE_TICK_COUNTER
 #endif
 
-#if defined(__aarch64__) && defined(HAVE_ARMV8_PMCCNTR_EL0)
+#if defined(__aarch64__) && defined(HAVE_ARMV8_PMCCNTR_EL0) && \
+    !defined(HAVE_TICK_COUNTER)
 typedef uint64_t ticks;
 static inline ticks getticks(void) {
   uint64_t cc = 0;
diff --git a/src/debug.c b/src/debug.c
index a680b3fd837e8ac0b30419ae952b2335675356ef..6257f7cf4e62c3db9027c820bb658eb678c0ecf1 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -48,6 +48,8 @@
 #include "./hydro/PressureEntropy/hydro_debug.h"
 #elif defined(HOPKINS_PU_SPH)
 #include "./hydro/PressureEnergy/hydro_debug.h"
+#elif defined(HOPKINS_PU_SPH_MONAGHAN)
+#include "./hydro/PressureEnergyMorrisMonaghanAV/hydro_debug.h"
 #elif defined(DEFAULT_SPH)
 #include "./hydro/Default/hydro_debug.h"
 #elif defined(GIZMO_MFV_SPH)
@@ -58,6 +60,8 @@
 #include "./hydro/Shadowswift/hydro_debug.h"
 #elif defined(PLANETARY_SPH)
 #include "./hydro/Planetary/hydro_debug.h"
+#elif defined(ANARCHY_PU_SPH)
+#include "./hydro/AnarchyPU/hydro_debug.h"
 #else
 #error "Invalid choice of SPH variant"
 #endif
@@ -172,8 +176,16 @@ int checkSpacehmax(struct space *s) {
   float cell_h_max = 0.0f;
   for (int k = 0; k < s->nr_cells; k++) {
     if (s->cells_top[k].nodeID == s->e->nodeID &&
-        s->cells_top[k].h_max > cell_h_max) {
-      cell_h_max = s->cells_top[k].h_max;
+        s->cells_top[k].hydro.h_max > cell_h_max) {
+      cell_h_max = s->cells_top[k].hydro.h_max;
+    }
+  }
+
+  float cell_stars_h_max = 0.0f;
+  for (int k = 0; k < s->nr_cells; k++) {
+    if (s->cells_top[k].nodeID == s->e->nodeID &&
+        s->cells_top[k].stars.h_max > cell_stars_h_max) {
+      cell_stars_h_max = s->cells_top[k].stars.h_max;
     }
   }
 
@@ -185,15 +197,26 @@ int checkSpacehmax(struct space *s) {
     }
   }
 
+  /* Now all the sparticles. */
+  float spart_h_max = 0.0f;
+  for (size_t k = 0; k < s->nr_sparts; k++) {
+    if (s->sparts[k].h > spart_h_max) {
+      spart_h_max = s->sparts[k].h;
+    }
+  }
+
   /*  If within some epsilon we are OK. */
-  if (fabsf(cell_h_max - part_h_max) <= FLT_EPSILON) return 1;
+  if (fabsf(cell_h_max - part_h_max) <= FLT_EPSILON &&
+      fabsf(cell_stars_h_max - spart_h_max) <= FLT_EPSILON)
+    return 1;
 
   /* There is a problem. Hunt it down. */
+  /* part */
   for (int k = 0; k < s->nr_cells; k++) {
     if (s->cells_top[k].nodeID == s->e->nodeID) {
-      if (s->cells_top[k].h_max > part_h_max) {
-        message("cell %d is inconsistent (%f > %f)", k, s->cells_top[k].h_max,
-                part_h_max);
+      if (s->cells_top[k].hydro.h_max > part_h_max) {
+        message("cell %d is inconsistent (%f > %f)", k,
+                s->cells_top[k].hydro.h_max, part_h_max);
       }
     }
   }
@@ -205,6 +228,23 @@ int checkSpacehmax(struct space *s) {
     }
   }
 
+  /* spart */
+  for (int k = 0; k < s->nr_cells; k++) {
+    if (s->cells_top[k].nodeID == s->e->nodeID) {
+      if (s->cells_top[k].stars.h_max > spart_h_max) {
+        message("cell %d is inconsistent (%f > %f)", k,
+                s->cells_top[k].stars.h_max, spart_h_max);
+      }
+    }
+  }
+
+  for (size_t k = 0; k < s->nr_sparts; k++) {
+    if (s->sparts[k].h > cell_stars_h_max) {
+      message("spart %lld is inconsistent (%f > %f)", s->sparts[k].id,
+              s->sparts[k].h, cell_stars_h_max);
+    }
+  }
+
   return 0;
 }
 
@@ -223,15 +263,17 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
 
   float h_max = 0.0f;
   float dx_max = 0.0f;
+  float stars_h_max = 0.0f;
+  float stars_dx_max = 0.0f;
   int result = 1;
 
   const double loc_min[3] = {c->loc[0], c->loc[1], c->loc[2]};
   const double loc_max[3] = {c->loc[0] + c->width[0], c->loc[1] + c->width[1],
                              c->loc[2] + c->width[2]};
 
-  const size_t nr_parts = c->count;
-  struct part *parts = c->parts;
-  struct xpart *xparts = c->xparts;
+  const size_t nr_parts = c->hydro.count;
+  struct part *parts = c->hydro.parts;
+  struct xpart *xparts = c->hydro.xparts;
   for (size_t k = 0; k < nr_parts; k++) {
 
     struct part *const p = &parts[k];
@@ -258,6 +300,33 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
     dx_max = max(dx_max, sqrt(dx2));
   }
 
+  const size_t nr_sparts = c->stars.count;
+  struct spart *sparts = c->stars.parts;
+  for (size_t k = 0; k < nr_sparts; k++) {
+
+    struct spart *const sp = &sparts[k];
+
+    if (sp->x[0] < loc_min[0] || sp->x[0] >= loc_max[0] ||
+        sp->x[1] < loc_min[1] || sp->x[1] >= loc_max[1] ||
+        sp->x[2] < loc_min[2] || sp->x[2] >= loc_max[2]) {
+
+      message(
+          "Inconsistent part position p->x=[%e %e %e], c->loc=[%e %e %e] "
+          "c->width=[%e %e %e]",
+          sp->x[0], sp->x[1], sp->x[2], c->loc[0], c->loc[1], c->loc[2],
+          c->width[0], c->width[1], c->width[2]);
+
+      result = 0;
+    }
+
+    const float dx2 = sp->x_diff[0] * sp->x_diff[0] +
+                      sp->x_diff[1] * sp->x_diff[1] +
+                      sp->x_diff[2] * sp->x_diff[2];
+
+    stars_h_max = max(stars_h_max, sp->h);
+    stars_dx_max = max(stars_dx_max, sqrt(dx2));
+  }
+
   if (c->split) {
     for (int k = 0; k < 8; k++) {
       if (c->progeny[k] != NULL) {
@@ -268,14 +337,28 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
   }
 
   /* Check. */
-  if (c->h_max != h_max) {
-    message("%d Inconsistent h_max: cell %f != parts %f", *depth, c->h_max,
-            h_max);
+  if (c->hydro.h_max != h_max) {
+    message("%d Inconsistent h_max: cell %f != parts %f", *depth,
+            c->hydro.h_max, h_max);
+    message("location: %f %f %f", c->loc[0], c->loc[1], c->loc[2]);
+    result = 0;
+  }
+  if (c->hydro.dx_max_part != dx_max) {
+    message("%d Inconsistent dx_max: %f != %f", *depth, c->hydro.dx_max_part,
+            dx_max);
+    message("location: %f %f %f", c->loc[0], c->loc[1], c->loc[2]);
+    result = 0;
+  }
+
+  if (c->stars.h_max != stars_h_max) {
+    message("%d Inconsistent stars_h_max: cell %f != parts %f", *depth,
+            c->stars.h_max, stars_h_max);
     message("location: %f %f %f", c->loc[0], c->loc[1], c->loc[2]);
     result = 0;
   }
-  if (c->dx_max_part != dx_max) {
-    message("%d Inconsistent dx_max: %f != %f", *depth, c->dx_max_part, dx_max);
+  if (c->stars.dx_max_part != stars_dx_max) {
+    message("%d Inconsistent stars_dx_max: %f != %f", *depth,
+            c->stars.dx_max_part, stars_dx_max);
     message("location: %f %f %f", c->loc[0], c->loc[1], c->loc[2]);
     result = 0;
   }
@@ -316,13 +399,13 @@ static void dumpCells_map(struct cell *c, void *data) {
 #endif
 
   /* Only cells with particles are dumped. */
-  if (c->count > 0 || c->gcount > 0 || c->scount > 0) {
+  if (c->hydro.count > 0 || c->grav.count > 0 || c->stars.count > 0) {
 
     /* In MPI mode we may only output cells with foreign partners.
      * These define the edges of the partitions. */
     int ismpiactive = 0;
 #if WITH_MPI
-    ismpiactive = (c->send_xv != NULL);
+    ismpiactive = (c->mpi.hydro.send_xv != NULL);
     if (mpiactive)
       mpiactive = ismpiactive;
     else
@@ -337,7 +420,8 @@ static void dumpCells_map(struct cell *c, void *data) {
     else
       active = 1;
 
-    /* So output local super cells that are active and have MPI
+    /* So output local super cells or top-level cells that are active and have
+     * MPI
      * tasks as requested. */
     if (c->nodeID == e->nodeID &&
         (!super || ((super && c->super == c) || (c->parent == NULL))) &&
@@ -346,14 +430,14 @@ static void dumpCells_map(struct cell *c, void *data) {
       /* If requested we work out how many particles are active in this cell. */
       int pactcount = 0;
       if (pactive) {
-        const struct part *parts = c->parts;
-        for (int k = 0; k < c->count; k++)
+        const struct part *parts = c->hydro.parts;
+        for (int k = 0; k < c->hydro.count; k++)
           if (part_is_active(&parts[k], e)) pactcount++;
-        struct gpart *gparts = c->gparts;
-        for (int k = 0; k < c->gcount; k++)
+        struct gpart *gparts = c->grav.parts;
+        for (int k = 0; k < c->grav.count; k++)
           if (gpart_is_active(&gparts[k], e)) pactcount++;
-        struct spart *sparts = c->sparts;
-        for (int k = 0; k < c->scount; k++)
+        struct spart *sparts = c->stars.parts;
+        for (int k = 0; k < c->stars.count; k++)
           if (spart_is_active(&sparts[k], e)) pactcount++;
       }
 
@@ -361,9 +445,9 @@ static void dumpCells_map(struct cell *c, void *data) {
               "  %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %6d %6d %6d %6d %6d %6d "
               "%6.1f %20lld %6d %6d %6d %6d %6d %6d %6d\n",
               c->loc[0], c->loc[1], c->loc[2], c->width[0], c->width[1],
-              c->width[2], e->step, c->count, c->gcount, c->scount, pactcount,
-              c->depth, ntasks, c->ti_hydro_end_min,
-              get_time_bin(c->ti_hydro_end_min), (c->super == c),
+              c->width[2], e->step, c->hydro.count, c->grav.count,
+              c->stars.count, pactcount, c->depth, ntasks, c->hydro.ti_end_min,
+              get_time_bin(c->hydro.ti_end_min), (c->super == c),
               (c->parent == NULL), cell_is_active_hydro(c, e), c->nodeID,
               c->nodeID == e->nodeID, ismpiactive);
     }
@@ -414,13 +498,13 @@ void dumpCells(const char *prefix, int super, int active, int mpiactive,
   fclose(file);
 }
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 
 /**
- * @brief Dump the METIS graph in standard format, simple format and weights
+ * @brief Dump a graph in METIS standard format, simple format and weights
  * only, to a file.
  *
- * The standard format output can be read into the METIS
+ * The standard format output can be read into the METIS and some ParMETIS
  * command-line tools. The simple format is just the cell connectivity (this
  * should not change between calls).  The weights format is the standard one,
  * minus the cell connectivity.
@@ -552,7 +636,7 @@ void dumpMETISGraph(const char *prefix, idx_t nvertices, idx_t nvertexweights,
   }
 }
 
-#endif /* HAVE_METIS */
+#endif /* HAVE_METIS || HAVE_PARMETIS */
 
 #ifdef HAVE_MPI
 /**
diff --git a/src/debug.h b/src/debug.h
index 061da6c50f35a74fd307c868f66ff0cc60256f61..3cafd17b835a1a816e049f3a714bedcaf34d183a 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -39,7 +39,7 @@ int checkCellhdxmax(const struct cell *c, int *depth);
 void dumpCells(const char *prefix, int super, int active, int mpiactive,
                int pactive, struct space *s, int rank, int step);
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 #include "metis.h"
 void dumpMETISGraph(const char *prefix, idx_t nvtxs, idx_t ncon, idx_t *xadj,
                     idx_t *adjncy, idx_t *vwgt, idx_t *vsize, idx_t *adjwgt);
diff --git a/src/dimension.h b/src/dimension.h
index 0b2093d718a61c6ce850db1970412af3e2e462b9..2ae3a6d8ee9cac0b8bd3241d5e7b45f8f62dc92a 100644
--- a/src/dimension.h
+++ b/src/dimension.h
@@ -89,6 +89,34 @@ __attribute__((always_inline)) INLINE static float pow_dimension(float x) {
 #endif
 }
 
+/**
+ * @brief Returns the argument to the power given by the inverse of the
+ * dimension
+ *
+ * Computes \f$x^{1/d}\f$.
+ */
+__attribute__((always_inline)) INLINE static float pow_inv_dimension(float x) {
+
+#if defined(HYDRO_DIMENSION_3D)
+
+  return cbrtf(x);
+
+#elif defined(HYDRO_DIMENSION_2D)
+
+  return sqrtf(x);
+
+#elif defined(HYDRO_DIMENSION_1D)
+
+  return x;
+
+#else
+
+  error("The dimension is not defined !");
+  return 0.f;
+
+#endif
+}
+
 /**
  * @brief Returns the argument to the power given by the dimension plus one
  *
@@ -288,7 +316,7 @@ __attribute__((always_inline)) INLINE static vector pow_dimension_vec(
 #else
 
   error("The dimension is not defined !");
-  return vec_set(0.f);
+  return vec_set1(0.f);
 
 #endif
 }
@@ -318,7 +346,7 @@ __attribute__((always_inline)) INLINE static vector pow_dimension_plus_one_vec(
 #else
 
   error("The dimension is not defined !");
-  return vec_set(0.f);
+  return vec_set1(0.f);
 
 #endif
 }
diff --git a/src/drift.h b/src/drift.h
index ff0fea744012b7143afed2a05b286d4646cdd69a..7e874fe0ceabe5b091cc7c5bb53adbef2c9a3efd 100644
--- a/src/drift.h
+++ b/src/drift.h
@@ -28,6 +28,7 @@
 #include "dimension.h"
 #include "hydro.h"
 #include "part.h"
+#include "stars.h"
 
 /**
  * @brief Perform the 'drift' operation on a #gpart.
@@ -137,6 +138,16 @@ __attribute__((always_inline)) INLINE static void drift_spart(
   sp->x[0] += sp->v[0] * dt_drift;
   sp->x[1] += sp->v[1] * dt_drift;
   sp->x[2] += sp->v[2] * dt_drift;
+
+  /* Predict the values of the extra fields */
+  stars_predict_extra(sp, dt_drift);
+
+  /* Compute offsets since last cell construction */
+  for (int k = 0; k < 3; k++) {
+    const float dx = sp->v[k] * dt_drift;
+    sp->x_diff[k] -= dx;
+    sp->x_diff_sort[k] -= dx;
+  }
 }
 
 #endif /* SWIFT_DRIFT_H */
diff --git a/src/dump.c b/src/dump.c
index ba50b517a72e71ab0ca5e791319c6336925762cb..9c519c2130b2612309e623b8234e3369214b52e2 100644
--- a/src/dump.c
+++ b/src/dump.c
@@ -48,17 +48,24 @@
  */
 void *dump_get(struct dump *d, size_t count, size_t *offset) {
   size_t local_offset = atomic_add(&d->count, count);
+#ifdef SWIFT_DEBUG_CHECKS
+  if (d->count > d->size) error("Dump file is too small.");
+#endif
   *offset = local_offset + d->file_offset;
   return (char *)d->data + local_offset;
 }
 
 /**
  * @brief Ensure that at least size bytes are available in the #dump.
+ *
+ * @param d The #dump.
+ * @param required_size The required size for the #dump
+ * @param increase_size If not enough size, increase by this amount
  */
-void dump_ensure(struct dump *d, size_t size) {
+void dump_ensure(struct dump *d, size_t required_size, size_t increase_size) {
 
   /* If we have enough space already, just bail. */
-  if (d->size - d->count > size) return;
+  if (d->size - d->count > required_size) return;
 
   /* Unmap the current data. */
   if (munmap(d->data, d->size) != 0) {
@@ -70,7 +77,7 @@ void dump_ensure(struct dump *d, size_t size) {
   const size_t trunc_count = d->count & d->page_mask;
   d->file_offset += trunc_count;
   d->count -= trunc_count;
-  d->size = (size * dump_grow_ensure_factor + ~d->page_mask) & d->page_mask;
+  d->size = (d->count + increase_size + ~d->page_mask) & d->page_mask;
 
   /* Re-allocate the file size. */
   if (posix_fallocate(d->fd, d->file_offset, d->size) != 0) {
@@ -121,7 +128,9 @@ void dump_close(struct dump *d) {
  */
 void dump_init(struct dump *d, const char *filename, size_t size) {
 
-  /* Create the output file. */
+  /* Create the output file.
+     The option O_RDWR seems to be required by mmap.
+  */
   if ((d->fd = open(filename, O_CREAT | O_RDWR, 0660)) == -1) {
     error("Failed to create dump file '%s' (%s).", filename, strerror(errno));
   }
diff --git a/src/dump.h b/src/dump.h
index 6857aa3a008a27e0e8ed23854d84f848ee0ca2be..021bc1e1dc22c178a893e42384c91fafdcf63112 100644
--- a/src/dump.h
+++ b/src/dump.h
@@ -27,9 +27,6 @@
 /* Standard headers */
 #include <stdlib.h>
 
-/* Some constants. */
-#define dump_grow_ensure_factor 10
-
 /** The dump struct. */
 struct dump {
 
@@ -54,7 +51,7 @@ struct dump {
 
 /* Function prototypes. */
 void dump_init(struct dump *d, const char *filename, size_t size);
-void dump_ensure(struct dump *d, size_t size);
+void dump_ensure(struct dump *d, size_t required_size, size_t increase_size);
 void dump_sync(struct dump *d);
 void dump_close(struct dump *d);
 void *dump_get(struct dump *d, size_t count, size_t *offset);
diff --git a/src/engine.c b/src/engine.c
index b56d7f4c4c3652a9f80fc463747f4fb1f4e2fbc2..1560bc068ec24d4572fd7a38016eb37de1696d08 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -62,11 +62,14 @@
 #include "cosmology.h"
 #include "cycle.h"
 #include "debug.h"
+#include "entropy_floor.h"
 #include "equation_of_state.h"
 #include "error.h"
 #include "gravity.h"
 #include "gravity_cache.h"
 #include "hydro.h"
+#include "logger.h"
+#include "logger_io.h"
 #include "map.h"
 #include "memswap.h"
 #include "memuse.h"
@@ -82,7 +85,8 @@
 #include "serial_io.h"
 #include "single_io.h"
 #include "sort_part.h"
-#include "sourceterms.h"
+#include "star_formation.h"
+#include "stars_io.h"
 #include "statistics.h"
 #include "timers.h"
 #include "tools.h"
@@ -107,10 +111,13 @@ const char *engine_policy_names[] = {"none",
                                      "cosmological integration",
                                      "drift everything",
                                      "reconstruct multi-poles",
+                                     "temperature",
                                      "cooling",
-                                     "sourceterms",
                                      "stars",
-                                     "structure finding"};
+                                     "structure finding",
+                                     "star formation",
+                                     "feedback",
+                                     "time-step limiter"};
 
 /** The rank of the engine as a global variable (for messages). */
 int engine_rank;
@@ -123,9 +130,11 @@ int engine_cstep;
  */
 struct end_of_step_data {
 
-  size_t updates, g_updates, s_updates;
+  size_t updated, g_updated, s_updated;
+  size_t inhibited, g_inhibited, s_inhibited;
   integertime_t ti_hydro_end_min, ti_hydro_end_max, ti_hydro_beg_max;
   integertime_t ti_gravity_end_min, ti_gravity_end_max, ti_gravity_beg_max;
+  integertime_t ti_stars_end_min;
   struct engine *e;
 };
 
@@ -143,7 +152,9 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
   /* Get the next free link. */
   const size_t ind = atomic_inc(&e->nr_links);
   if (ind >= e->size_links) {
-    error("Link table overflow.");
+    error(
+        "Link table overflow. Increase the value of "
+        "`Scheduler:links_per_tasks`.");
   }
   struct link *res = &e->links[ind];
 
@@ -152,267 +163,6 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
   res->next = atomic_swap(l, res);
 }
 
-/**
- * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
- */
-void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
-                       struct task *ghost_out) {
-
-  /* If we have reached the leaf OR have to few particles to play with*/
-  if (!c->split || c->count < engine_max_parts_per_ghost) {
-
-    /* Add the ghost task and its dependencies */
-    struct scheduler *s = &e->sched;
-    c->ghost =
-        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
-    scheduler_addunlock(s, ghost_in, c->ghost);
-    scheduler_addunlock(s, c->ghost, ghost_out);
-  } else {
-    /* Keep recursing */
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL)
-        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
-  }
-}
-
-/**
- * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
- * i.e. all the O(Npart) tasks -- timestep version
- *
- * Tasks are only created here. The dependencies will be added later on.
- *
- * Note that there is no need to recurse below the super-cell. Note also
- * that we only add tasks if the relevant particles are present in the cell.
- *
- * @param e The #engine.
- * @param c The #cell.
- */
-void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {
-
-  struct scheduler *s = &e->sched;
-  const int is_with_cooling = (e->policy & engine_policy_cooling);
-
-  /* Are we in a super-cell ? */
-  if (c->super == c) {
-
-    /* Local tasks only... */
-    if (c->nodeID == e->nodeID) {
-
-      /* Add the two half kicks */
-      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
-                                   c, NULL);
-
-      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
-                                   c, NULL);
-
-      /* Add the time-step calculation task and its dependency */
-      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
-                                      0, 0, c, NULL);
-
-      /* Add the task finishing the force calculation */
-      c->end_force = scheduler_addtask(s, task_type_end_force,
-                                       task_subtype_none, 0, 0, c, NULL);
-
-      if (!is_with_cooling) scheduler_addunlock(s, c->end_force, c->kick2);
-      scheduler_addunlock(s, c->kick2, c->timestep);
-      scheduler_addunlock(s, c->timestep, c->kick1);
-    }
-
-  } else { /* We are above the super-cell so need to go deeper */
-
-    /* Recurse. */
-    if (c->split)
-      for (int k = 0; k < 8; k++)
-        if (c->progeny[k] != NULL)
-          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
-  }
-}
-
-/**
- * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
- * i.e. all the O(Npart) tasks -- hydro version
- *
- * Tasks are only created here. The dependencies will be added later on.
- *
- * Note that there is no need to recurse below the super-cell. Note also
- * that we only add tasks if the relevant particles are present in the cell.
- *
- * @param e The #engine.
- * @param c The #cell.
- */
-void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c) {
-
-  struct scheduler *s = &e->sched;
-  const int is_with_cooling = (e->policy & engine_policy_cooling);
-  const int is_with_sourceterms = (e->policy & engine_policy_sourceterms);
-
-  /* Are we in a super-cell ? */
-  if (c->super_hydro == c) {
-
-    /* Add the sort task. */
-    c->sorts =
-        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);
-
-    /* Local tasks only... */
-    if (c->nodeID == e->nodeID) {
-
-      /* Add the drift task. */
-      c->drift_part = scheduler_addtask(s, task_type_drift_part,
-                                        task_subtype_none, 0, 0, c, NULL);
-
-      /* Generate the ghost tasks. */
-      c->ghost_in =
-          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
-                            /* implicit = */ 1, c, NULL);
-      c->ghost_out =
-          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
-                            /* implicit = */ 1, c, NULL);
-      engine_add_ghosts(e, c, c->ghost_in, c->ghost_out);
-
-#ifdef EXTRA_HYDRO_LOOP
-      /* Generate the extra ghost task. */
-      c->extra_ghost = scheduler_addtask(s, task_type_extra_ghost,
-                                         task_subtype_none, 0, 0, c, NULL);
-#endif
-
-      /* Cooling task */
-      if (is_with_cooling) {
-        c->cooling = scheduler_addtask(s, task_type_cooling, task_subtype_none,
-                                       0, 0, c, NULL);
-
-        scheduler_addunlock(s, c->super->end_force, c->cooling);
-        scheduler_addunlock(s, c->cooling, c->super->kick2);
-      }
-
-      /* add source terms */
-      if (is_with_sourceterms) {
-        c->sourceterms = scheduler_addtask(s, task_type_sourceterms,
-                                           task_subtype_none, 0, 0, c, NULL);
-      }
-    }
-
-  } else { /* We are above the super-cell so need to go deeper */
-
-    /* Recurse. */
-    if (c->split)
-      for (int k = 0; k < 8; k++)
-        if (c->progeny[k] != NULL)
-          engine_make_hierarchical_tasks_hydro(e, c->progeny[k]);
-  }
-}
-
-/**
- * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
- * i.e. all the O(Npart) tasks -- gravity version
- *
- * Tasks are only created here. The dependencies will be added later on.
- *
- * Note that there is no need to recurse below the super-cell. Note also
- * that we only add tasks if the relevant particles are present in the cell.
- *
- * @param e The #engine.
- * @param c The #cell.
- */
-void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {
-
-  struct scheduler *s = &e->sched;
-  const int periodic = e->s->periodic;
-  const int is_self_gravity = (e->policy & engine_policy_self_gravity);
-
-  /* Are we in a super-cell ? */
-  if (c->super_gravity == c) {
-
-    /* Local tasks only... */
-    if (c->nodeID == e->nodeID) {
-
-      c->drift_gpart = scheduler_addtask(s, task_type_drift_gpart,
-                                         task_subtype_none, 0, 0, c, NULL);
-
-      if (is_self_gravity) {
-
-        /* Initialisation of the multipoles */
-        c->init_grav = scheduler_addtask(s, task_type_init_grav,
-                                         task_subtype_none, 0, 0, c, NULL);
-
-        /* Gravity non-neighbouring pm calculations */
-        c->grav_long_range = scheduler_addtask(
-            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);
-
-        /* Gravity recursive down-pass */
-        c->grav_down = scheduler_addtask(s, task_type_grav_down,
-                                         task_subtype_none, 0, 0, c, NULL);
-
-        /* Implicit tasks for the up and down passes */
-        c->init_grav_out = scheduler_addtask(s, task_type_init_grav_out,
-                                             task_subtype_none, 0, 1, c, NULL);
-        c->grav_down_in = scheduler_addtask(s, task_type_grav_down_in,
-                                            task_subtype_none, 0, 1, c, NULL);
-
-        /* Gravity mesh force propagation */
-        if (periodic)
-          c->grav_mesh = scheduler_addtask(s, task_type_grav_mesh,
-                                           task_subtype_none, 0, 0, c, NULL);
-
-        if (periodic) scheduler_addunlock(s, c->drift_gpart, c->grav_mesh);
-        if (periodic) scheduler_addunlock(s, c->grav_mesh, c->grav_down);
-        scheduler_addunlock(s, c->init_grav, c->grav_long_range);
-        scheduler_addunlock(s, c->grav_long_range, c->grav_down);
-        scheduler_addunlock(s, c->grav_down, c->super->end_force);
-
-        /* Link in the implicit tasks */
-        scheduler_addunlock(s, c->init_grav, c->init_grav_out);
-        scheduler_addunlock(s, c->grav_down_in, c->grav_down);
-      }
-    }
-  }
-
-  /* We are below the super-cell but not below the maximal splitting depth */
-  else if (c->super_gravity != NULL && c->depth <= space_subdepth_grav) {
-
-    /* Local tasks only... */
-    if (c->nodeID == e->nodeID) {
-
-      if (is_self_gravity) {
-
-        c->init_grav_out = scheduler_addtask(s, task_type_init_grav_out,
-                                             task_subtype_none, 0, 1, c, NULL);
-
-        c->grav_down_in = scheduler_addtask(s, task_type_grav_down_in,
-                                            task_subtype_none, 0, 1, c, NULL);
-
-        scheduler_addunlock(s, c->parent->init_grav_out, c->init_grav_out);
-        scheduler_addunlock(s, c->grav_down_in, c->parent->grav_down_in);
-      }
-    }
-  }
-
-  /* Recurse but not below the maximal splitting depth */
-  if (c->split && c->depth <= space_subdepth_grav)
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL)
-        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
-}
-
-void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
-                                           void *extra_data) {
-  struct engine *e = (struct engine *)extra_data;
-  const int is_with_hydro = (e->policy & engine_policy_hydro);
-  const int is_with_self_gravity = (e->policy & engine_policy_self_gravity);
-  const int is_with_external_gravity =
-      (e->policy & engine_policy_external_gravity);
-
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct cell *c = &((struct cell *)map_data)[ind];
-    /* Make the common tasks (time integration) */
-    engine_make_hierarchical_tasks_common(e, c);
-    /* Add the hydro stuff */
-    if (is_with_hydro) engine_make_hierarchical_tasks_hydro(e, c);
-    /* And the gravity stuff */
-    if (is_with_self_gravity || is_with_external_gravity)
-      engine_make_hierarchical_tasks_gravity(e, c);
-  }
-}
-
 #ifdef WITH_MPI
 /**
  * Do the exchange of one type of particles with all the other nodes.
@@ -655,7 +405,7 @@ struct savelink_mapper_data {
       for (int k = 0; k < counts[nodeID * nr_nodes + node]; k++) {             \
         if (parts[k + offset].gpart != NULL) {                                 \
           if (CHECKS)                                                          \
-            if (parts[k].gpart->id_or_neg_offset > 0)                          \
+            if (parts[k + offset].gpart->id_or_neg_offset > 0)                 \
               error("Trying to link a partnerless " #TYPE "!");                \
           parts[k + offset].gpart->id_or_neg_offset = -count;                  \
           count++;                                                             \
@@ -753,7 +503,7 @@ static void engine_redistribute_relink_mapper(void *map_data, int num_elements,
       }
 
       /* Does this gpart have a star partner ? */
-      else if (s->gparts[k].type == swift_type_star) {
+      else if (s->gparts[k].type == swift_type_stars) {
 
         const ptrdiff_t partner_index =
             offset_sparts - s->gparts[k].id_or_neg_offset;
@@ -793,11 +543,87 @@ void engine_redistribute(struct engine *e) {
   struct space *s = e->s;
   struct cell *cells = s->cells_top;
   const int nr_cells = s->nr_cells;
+  struct xpart *xparts = s->xparts;
   struct part *parts = s->parts;
   struct gpart *gparts = s->gparts;
   struct spart *sparts = s->sparts;
   ticks tic = getticks();
 
+  size_t nr_parts = s->nr_parts;
+  size_t nr_gparts = s->nr_gparts;
+  size_t nr_sparts = s->nr_sparts;
+
+  /* Start by moving inhibited particles to the end of the arrays */
+  for (size_t k = 0; k < nr_parts; /* void */) {
+    if (parts[k].time_bin == time_bin_inhibited) {
+      nr_parts -= 1;
+
+      /* Swap the particle */
+      memswap(&parts[k], &parts[nr_parts], sizeof(struct part));
+
+      /* Swap the xpart */
+      memswap(&xparts[k], &xparts[nr_parts], sizeof(struct xpart));
+
+      /* Swap the link with the gpart */
+      if (parts[k].gpart != NULL) {
+        parts[k].gpart->id_or_neg_offset = -k;
+      }
+      if (parts[nr_parts].gpart != NULL) {
+        parts[nr_parts].gpart->id_or_neg_offset = -nr_parts;
+      }
+    } else {
+      k++;
+    }
+  }
+
+  /* Now move inhibited star particles to the end of the arrays */
+  for (size_t k = 0; k < nr_sparts; /* void */) {
+    if (sparts[k].time_bin == time_bin_inhibited) {
+      nr_sparts -= 1;
+
+      /* Swap the particle */
+      memswap(&s->sparts[k], &s->sparts[nr_sparts], sizeof(struct spart));
+
+      /* Swap the link with the gpart */
+      if (s->sparts[k].gpart != NULL) {
+        s->sparts[k].gpart->id_or_neg_offset = -k;
+      }
+      if (s->sparts[nr_sparts].gpart != NULL) {
+        s->sparts[nr_sparts].gpart->id_or_neg_offset = -nr_sparts;
+      }
+    } else {
+      k++;
+    }
+  }
+
+  /* Finally do the same with the gravity particles */
+  for (size_t k = 0; k < nr_gparts; /* void */) {
+    if (gparts[k].time_bin == time_bin_inhibited) {
+      nr_gparts -= 1;
+
+      /* Swap the particle */
+      memswap(&s->gparts[k], &s->gparts[nr_gparts], sizeof(struct gpart));
+
+      /* Swap the link with part/spart */
+      if (s->gparts[k].type == swift_type_gas) {
+        s->parts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
+      } else if (s->gparts[k].type == swift_type_stars) {
+        s->sparts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
+      }
+      if (s->gparts[nr_gparts].type == swift_type_gas) {
+        s->parts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
+            &s->gparts[nr_gparts];
+      } else if (s->gparts[nr_gparts].type == swift_type_stars) {
+        s->sparts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
+            &s->gparts[nr_gparts];
+      }
+    } else {
+      k++;
+    }
+  }
+
+  /* Now we are ready to deal with real particles and can start the exchange. */
+
   /* Allocate temporary arrays to store the counts of particles to be sent
    * and the destination of each particle */
   int *counts;
@@ -805,7 +631,7 @@ void engine_redistribute(struct engine *e) {
     error("Failed to allocate counts temporary buffer.");
 
   int *dest;
-  if ((dest = (int *)malloc(sizeof(int) * s->nr_parts)) == NULL)
+  if ((dest = (int *)malloc(sizeof(int) * nr_parts)) == NULL)
     error("Failed to allocate dest temporary buffer.");
 
   /* Simple index of node IDs, used for mappers over nodes. */
@@ -825,16 +651,16 @@ void engine_redistribute(struct engine *e) {
   redist_data.base = (void *)parts;
 
   threadpool_map(&e->threadpool, engine_redistribute_dest_mapper_part, parts,
-                 s->nr_parts, sizeof(struct part), 0, &redist_data);
+                 nr_parts, sizeof(struct part), 0, &redist_data);
 
   /* Sort the particles according to their cell index. */
-  if (s->nr_parts > 0)
+  if (nr_parts > 0)
     space_parts_sort(s->parts, s->xparts, dest, &counts[nodeID * nr_nodes],
                      nr_nodes, 0);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the part have been sorted correctly. */
-  for (size_t k = 0; k < s->nr_parts; k++) {
+  for (size_t k = 0; k < nr_parts; k++) {
     const struct part *p = &s->parts[k];
 
     /* New cell index */
@@ -858,7 +684,7 @@ void engine_redistribute(struct engine *e) {
 
   /* We will need to re-link the gpart partners of parts, so save their
    * relative positions in the sent lists. */
-  if (s->nr_parts > 0 && s->nr_gparts > 0) {
+  if (nr_parts > 0 && nr_gparts > 0) {
 
     struct savelink_mapper_data savelink_data;
     savelink_data.nr_nodes = nr_nodes;
@@ -876,7 +702,7 @@ void engine_redistribute(struct engine *e) {
     error("Failed to allocate s_counts temporary buffer.");
 
   int *s_dest;
-  if ((s_dest = (int *)malloc(sizeof(int) * s->nr_sparts)) == NULL)
+  if ((s_dest = (int *)malloc(sizeof(int) * nr_sparts)) == NULL)
     error("Failed to allocate s_dest temporary buffer.");
 
   redist_data.counts = s_counts;
@@ -884,16 +710,16 @@ void engine_redistribute(struct engine *e) {
   redist_data.base = (void *)sparts;
 
   threadpool_map(&e->threadpool, engine_redistribute_dest_mapper_spart, sparts,
-                 s->nr_sparts, sizeof(struct spart), 0, &redist_data);
+                 nr_sparts, sizeof(struct spart), 0, &redist_data);
 
   /* Sort the particles according to their cell index. */
-  if (s->nr_sparts > 0)
+  if (nr_sparts > 0)
     space_sparts_sort(s->sparts, s_dest, &s_counts[nodeID * nr_nodes], nr_nodes,
                       0);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the spart have been sorted correctly. */
-  for (size_t k = 0; k < s->nr_sparts; k++) {
+  for (size_t k = 0; k < nr_sparts; k++) {
     const struct spart *sp = &s->sparts[k];
 
     /* New cell index */
@@ -916,7 +742,7 @@ void engine_redistribute(struct engine *e) {
 #endif
 
   /* We need to re-link the gpart partners of sparts. */
-  if (s->nr_sparts > 0) {
+  if (nr_sparts > 0) {
 
     struct savelink_mapper_data savelink_data;
     savelink_data.nr_nodes = nr_nodes;
@@ -934,7 +760,7 @@ void engine_redistribute(struct engine *e) {
     error("Failed to allocate g_gcount temporary buffer.");
 
   int *g_dest;
-  if ((g_dest = (int *)malloc(sizeof(int) * s->nr_gparts)) == NULL)
+  if ((g_dest = (int *)malloc(sizeof(int) * nr_gparts)) == NULL)
     error("Failed to allocate g_dest temporary buffer.");
 
   redist_data.counts = g_counts;
@@ -942,16 +768,16 @@ void engine_redistribute(struct engine *e) {
   redist_data.base = (void *)gparts;
 
   threadpool_map(&e->threadpool, engine_redistribute_dest_mapper_gpart, gparts,
-                 s->nr_gparts, sizeof(struct gpart), 0, &redist_data);
+                 nr_gparts, sizeof(struct gpart), 0, &redist_data);
 
   /* Sort the gparticles according to their cell index. */
-  if (s->nr_gparts > 0)
+  if (nr_gparts > 0)
     space_gparts_sort(s->gparts, s->parts, s->sparts, g_dest,
                       &g_counts[nodeID * nr_nodes], nr_nodes);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the gpart have been sorted correctly. */
-  for (size_t k = 0; k < s->nr_gparts; k++) {
+  for (size_t k = 0; k < nr_gparts; k++) {
     const struct gpart *gp = &s->gparts[k];
 
     /* New cell index */
@@ -1026,49 +852,50 @@ void engine_redistribute(struct engine *e) {
   /* Now each node knows how many parts, sparts and gparts will be transferred
    * to every other node.
    * Get the new numbers of particles for this node. */
-  size_t nr_parts = 0, nr_gparts = 0, nr_sparts = 0;
-  for (int k = 0; k < nr_nodes; k++) nr_parts += counts[k * nr_nodes + nodeID];
+  size_t nr_parts_new = 0, nr_gparts_new = 0, nr_sparts_new = 0;
+  for (int k = 0; k < nr_nodes; k++)
+    nr_parts_new += counts[k * nr_nodes + nodeID];
   for (int k = 0; k < nr_nodes; k++)
-    nr_gparts += g_counts[k * nr_nodes + nodeID];
+    nr_gparts_new += g_counts[k * nr_nodes + nodeID];
   for (int k = 0; k < nr_nodes; k++)
-    nr_sparts += s_counts[k * nr_nodes + nodeID];
+    nr_sparts_new += s_counts[k * nr_nodes + nodeID];
 
   /* Now exchange the particles, type by type to keep the memory required
    * under control. */
 
   /* SPH particles. */
-  void *new_parts = engine_do_redistribute(counts, (char *)s->parts, nr_parts,
-                                           sizeof(struct part), part_align,
-                                           part_mpi_type, nr_nodes, nodeID);
+  void *new_parts = engine_do_redistribute(
+      counts, (char *)s->parts, nr_parts_new, sizeof(struct part), part_align,
+      part_mpi_type, nr_nodes, nodeID);
   free(s->parts);
   s->parts = (struct part *)new_parts;
-  s->nr_parts = nr_parts;
-  s->size_parts = engine_redistribute_alloc_margin * nr_parts;
+  s->nr_parts = nr_parts_new;
+  s->size_parts = engine_redistribute_alloc_margin * nr_parts_new;
 
   /* Extra SPH particle properties. */
-  new_parts = engine_do_redistribute(counts, (char *)s->xparts, nr_parts,
+  new_parts = engine_do_redistribute(counts, (char *)s->xparts, nr_parts_new,
                                      sizeof(struct xpart), xpart_align,
                                      xpart_mpi_type, nr_nodes, nodeID);
   free(s->xparts);
   s->xparts = (struct xpart *)new_parts;
 
   /* Gravity particles. */
-  new_parts = engine_do_redistribute(g_counts, (char *)s->gparts, nr_gparts,
+  new_parts = engine_do_redistribute(g_counts, (char *)s->gparts, nr_gparts_new,
                                      sizeof(struct gpart), gpart_align,
                                      gpart_mpi_type, nr_nodes, nodeID);
   free(s->gparts);
   s->gparts = (struct gpart *)new_parts;
-  s->nr_gparts = nr_gparts;
-  s->size_gparts = engine_redistribute_alloc_margin * nr_gparts;
+  s->nr_gparts = nr_gparts_new;
+  s->size_gparts = engine_redistribute_alloc_margin * nr_gparts_new;
 
   /* Star particles. */
-  new_parts = engine_do_redistribute(s_counts, (char *)s->sparts, nr_sparts,
+  new_parts = engine_do_redistribute(s_counts, (char *)s->sparts, nr_sparts_new,
                                      sizeof(struct spart), spart_align,
                                      spart_mpi_type, nr_nodes, nodeID);
   free(s->sparts);
   s->sparts = (struct spart *)new_parts;
-  s->nr_sparts = nr_sparts;
-  s->size_sparts = engine_redistribute_alloc_margin * nr_sparts;
+  s->nr_sparts = nr_sparts_new;
+  s->size_sparts = engine_redistribute_alloc_margin * nr_sparts_new;
 
   /* All particles have now arrived. Time for some final operations on the
      stuff we just received */
@@ -1095,7 +922,7 @@ void engine_redistribute(struct engine *e) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that all parts are in the right place. */
-  for (size_t k = 0; k < nr_parts; k++) {
+  for (size_t k = 0; k < nr_parts_new; k++) {
     const int cid = cell_getid(s->cdim, s->parts[k].x[0] * s->iwidth[0],
                                s->parts[k].x[1] * s->iwidth[1],
                                s->parts[k].x[2] * s->iwidth[2]);
@@ -1103,7 +930,7 @@ void engine_redistribute(struct engine *e) {
       error("Received particle (%zu) that does not belong here (nodeID=%i).", k,
             cells[cid].nodeID);
   }
-  for (size_t k = 0; k < nr_gparts; k++) {
+  for (size_t k = 0; k < nr_gparts_new; k++) {
     const int cid = cell_getid(s->cdim, s->gparts[k].x[0] * s->iwidth[0],
                                s->gparts[k].x[1] * s->iwidth[1],
                                s->gparts[k].x[2] * s->iwidth[2]);
@@ -1111,7 +938,7 @@ void engine_redistribute(struct engine *e) {
       error("Received g-particle (%zu) that does not belong here (nodeID=%i).",
             k, cells[cid].nodeID);
   }
-  for (size_t k = 0; k < nr_sparts; k++) {
+  for (size_t k = 0; k < nr_sparts_new; k++) {
     const int cid = cell_getid(s->cdim, s->sparts[k].x[0] * s->iwidth[0],
                                s->sparts[k].x[1] * s->iwidth[1],
                                s->sparts[k].x[2] * s->iwidth[2]);
@@ -1121,8 +948,8 @@ void engine_redistribute(struct engine *e) {
   }
 
   /* Verify that the links are correct */
-  part_verify_links(s->parts, s->gparts, s->sparts, nr_parts, nr_gparts,
-                    nr_sparts, e->verbose);
+  part_verify_links(s->parts, s->gparts, s->sparts, nr_parts_new, nr_gparts_new,
+                    nr_sparts_new, e->verbose);
 #endif
 
   /* Be verbose about what just happened. */
@@ -1131,7 +958,7 @@ void engine_redistribute(struct engine *e) {
     for (int k = 0; k < nr_cells; k++)
       if (cells[k].nodeID == nodeID) my_cells += 1;
     message("node %i now has %zu parts, %zu sparts and %zu gparts in %i cells.",
-            nodeID, nr_parts, nr_sparts, nr_gparts, my_cells);
+            nodeID, nr_parts_new, nr_sparts_new, nr_gparts_new, my_cells);
   }
 
   /* Flag that a redistribute has taken place */
@@ -1152,7 +979,7 @@ void engine_redistribute(struct engine *e) {
  */
 void engine_repartition(struct engine *e) {
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_PARMETIS) || defined(HAVE_METIS))
 
   ticks tic = getticks();
 
@@ -1162,8 +989,7 @@ void engine_repartition(struct engine *e) {
   fflush(stdout);
 
   /* Check that all cells have been drifted to the current time */
-  space_check_drift_point(e->s, e->ti_current,
-                          e->policy & engine_policy_self_gravity);
+  space_check_drift_point(e->s, e->ti_current, /*check_multipoles=*/0);
 #endif
 
   /* Clear the repartition flag. */
@@ -1173,6 +999,12 @@ void engine_repartition(struct engine *e) {
    * bug that doesn't handle this case well. */
   if (e->nr_nodes == 1) return;
 
+  /* Generate the fixed costs include file. */
+  if (e->step > 3 && e->reparttype->trigger <= 1.f) {
+    task_dump_stats("partition_fixed_costs.h", e, /* header = */ 1,
+                    /* allranks = */ 1);
+  }
+
   /* Do the repartitioning. */
   partition_repartition(e->reparttype, e->nodeID, e->nr_nodes, e->s,
                         e->sched.tasks, e->sched.nr_tasks);
@@ -1210,7 +1042,7 @@ void engine_repartition(struct engine *e) {
             clocks_getunit());
 #else
   if (e->reparttype->type != REPART_NONE)
-    error("SWIFT was not compiled with MPI and METIS support.");
+    error("SWIFT was not compiled with MPI and METIS or ParMETIS support.");
 
   /* Clear the repartition flag. */
   e->forcerepart = 0;
@@ -1226,32 +1058,43 @@ void engine_repartition_trigger(struct engine *e) {
 
 #ifdef WITH_MPI
 
-  /* Do nothing if there have not been enough steps since the last
-   * repartition, don't want to repeat this too often or immediately after
-   * a repartition step. Also nothing to do when requested. */
+  const ticks tic = getticks();
+
+  /* Do nothing if there have not been enough steps since the last repartition
+   * as we don't want to repeat this too often or immediately after a
+   * repartition step. Also nothing to do when requested. */
   if (e->step - e->last_repartition >= 2 &&
       e->reparttype->type != REPART_NONE) {
 
-    /* Old style if trigger is >1 or this is the second step (want an early
-     * repartition following the initial repartition). */
-    if (e->reparttype->trigger > 1 || e->step == 2) {
+    /* If we have fixed costs available and this is step 2 or we are forcing
+     * repartitioning then we do a fixed costs one now. */
+    if (e->reparttype->trigger > 1 ||
+        (e->step == 2 && e->reparttype->use_fixed_costs)) {
+
       if (e->reparttype->trigger > 1) {
         if ((e->step % (int)e->reparttype->trigger) == 0) e->forcerepart = 1;
       } else {
         e->forcerepart = 1;
       }
+      e->reparttype->use_ticks = 0;
 
     } else {
 
-      /* Use cputimes from ranks to estimate the imbalance. */
-      /* First check if we are going to skip this stage anyway, if so do that
-       * now. If is only worth checking the CPU loads when we have processed a
-       * significant number of all particles. */
+      /* It is only worth checking the CPU loads when we have processed a
+       * significant number of all particles as we require all tasks to have
+       * timings. */
       if ((e->updates > 1 &&
            e->updates >= e->total_nr_parts * e->reparttype->minfrac) ||
           (e->g_updates > 1 &&
            e->g_updates >= e->total_nr_gparts * e->reparttype->minfrac)) {
 
+        /* Should we are use the task timings or fixed costs. */
+        if (e->reparttype->use_fixed_costs > 1) {
+          e->reparttype->use_ticks = 0;
+        } else {
+          e->reparttype->use_ticks = 1;
+        }
+
         /* Get CPU time used since the last call to this function. */
         double elapsed_cputime =
             clocks_get_cputime_used() - e->cputime_last_step;
@@ -1274,17 +1117,22 @@ void engine_repartition_trigger(struct engine *e) {
           double mean = sum / (double)e->nr_nodes;
 
           /* Are we out of balance? */
-          if (((maxtime - mintime) / mean) > e->reparttype->trigger) {
+          double abs_trigger = fabs(e->reparttype->trigger);
+          if (((maxtime - mintime) / mean) > abs_trigger) {
             if (e->verbose)
-              message("trigger fraction %.3f exceeds %.3f will repartition",
-                      (maxtime - mintime) / mintime, e->reparttype->trigger);
+              message("trigger fraction %.3f > %.3f will repartition",
+                      (maxtime - mintime) / mean, abs_trigger);
             e->forcerepart = 1;
+          } else {
+            if (e->verbose)
+              message("trigger fraction %.3f =< %.3f will not repartition",
+                      (maxtime - mintime) / mean, abs_trigger);
           }
         }
-
-        /* All nodes do this together. */
-        MPI_Bcast(&e->forcerepart, 1, MPI_INT, 0, MPI_COMM_WORLD);
       }
+
+      /* All nodes do this together. */
+      MPI_Bcast(&e->forcerepart, 1, MPI_INT, 0, MPI_COMM_WORLD);
     }
 
     /* Remember we did this. */
@@ -1294,98 +1142,38 @@ void engine_repartition_trigger(struct engine *e) {
   /* We always reset CPU time for next check, unless it will not be used. */
   if (e->reparttype->type != REPART_NONE)
     e->cputime_last_step = clocks_get_cputime_used();
+
+  if (e->verbose)
+    message("took %.3f %s", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 #endif
 }
 
 /**
- * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
+ * @brief Exchange cell structures with other nodes.
  *
  * @param e The #engine.
- * @param ci The sending #cell.
- * @param cj Dummy cell containing the nodeID of the receiving node.
- * @param t_xv The send_xv #task, if it has already been created.
- * @param t_rho The send_rho #task, if it has already been created.
- * @param t_gradient The send_gradient #task, if already created.
  */
-void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
-                                struct cell *cj, struct task *t_xv,
-                                struct task *t_rho, struct task *t_gradient) {
+void engine_exchange_cells(struct engine *e) {
 
 #ifdef WITH_MPI
-  struct link *l = NULL;
-  struct scheduler *s = &e->sched;
-  const int nodeID = cj->nodeID;
-
-  /* Check if any of the density tasks are for the target node. */
-  for (l = ci->density; l != NULL; l = l->next)
-    if (l->t->ci->nodeID == nodeID ||
-        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
-      break;
-
-  /* If so, attach send tasks. */
-  if (l != NULL) {
-
-    /* Create the tasks and their dependencies? */
-    if (t_xv == NULL) {
-
-      /* Create a tag for this cell. */
-      if (ci->tag < 0) cell_tag(ci);
-
-      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->tag, 0,
-                               ci, cj);
-      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho, ci->tag, 0,
-                                ci, cj);
-#ifdef EXTRA_HYDRO_LOOP
-      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
-                                     ci->tag, 0, ci, cj);
-#endif
-
-#ifdef EXTRA_HYDRO_LOOP
 
-      scheduler_addunlock(s, t_gradient, ci->super->kick2);
-
-      scheduler_addunlock(s, ci->super_hydro->extra_ghost, t_gradient);
-
-      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
-       * task. */
-      scheduler_addunlock(s, t_rho, ci->super_hydro->extra_ghost);
-
-      /* The send_rho task depends on the cell's ghost task. */
-      scheduler_addunlock(s, ci->super_hydro->ghost_out, t_rho);
-
-      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
-      scheduler_addunlock(s, t_xv, ci->super_hydro->ghost_in);
-
-#else
-      /* The send_rho task should unlock the super_hydro-cell's kick task. */
-      scheduler_addunlock(s, t_rho, ci->super->end_force);
-
-      /* The send_rho task depends on the cell's ghost task. */
-      scheduler_addunlock(s, ci->super_hydro->ghost_out, t_rho);
-
-      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
-      scheduler_addunlock(s, t_xv, ci->super_hydro->ghost_in);
+  const int with_gravity = e->policy & engine_policy_self_gravity;
+  const ticks tic = getticks();
 
-#endif
+  /* Exchange the cell structure with neighbouring ranks. */
+  proxy_cells_exchange(e->proxies, e->nr_proxies, e->s, with_gravity);
 
-      /* Drift before you send */
-      scheduler_addunlock(s, ci->super_hydro->drift_part, t_xv);
-    }
+  memuse_report("parts_foreign", sizeof(struct part) * e->s->size_parts_foreign);
 
-    /* Add them to the local cell. */
-    engine_addlink(e, &ci->send_xv, t_xv);
-    engine_addlink(e, &ci->send_rho, t_rho);
-#ifdef EXTRA_HYDRO_LOOP
-    engine_addlink(e, &ci->send_gradient, t_gradient);
-#endif
-  }
+  memuse_report("gparts_foreign",
+                sizeof(struct gpart) * e->s->size_gparts_foreign);
 
-  /* Recurse? */
-  if (ci->split)
-    for (int k = 0; k < 8; k++)
-      if (ci->progeny[k] != NULL)
-        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
-                                   t_gradient);
+  memuse_report("sparts_foreign",
+                sizeof(struct spart) * e->s->size_sparts_foreign);
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 
 #else
   error("SWIFT was not compiled with MPI support.");
@@ -1393,447 +1181,101 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
 }
 
 /**
- * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
+ * @brief Exchange straying particles with other nodes.
  *
  * @param e The #engine.
- * @param ci The sending #cell.
- * @param cj Dummy cell containing the nodeID of the receiving node.
- * @param t_grav The send_grav #task, if it has already been created.
+ * @param offset_parts The index in the parts array as of which the foreign
+ *        parts reside (i.e. the current number of local #part).
+ * @param ind_part The foreign #cell ID of each part.
+ * @param Npart The number of stray parts, contains the number of parts received
+ *        on return.
+ * @param offset_gparts The index in the gparts array as of which the foreign
+ *        parts reside (i.e. the current number of local #gpart).
+ * @param ind_gpart The foreign #cell ID of each gpart.
+ * @param Ngpart The number of stray gparts, contains the number of gparts
+ *        received on return.
+ * @param offset_sparts The index in the sparts array as of which the foreign
+ *        parts reside (i.e. the current number of local #spart).
+ * @param ind_spart The foreign #cell ID of each spart.
+ * @param Nspart The number of stray sparts, contains the number of sparts
+ *        received on return.
+ *
+ * Note that this function does not mess-up the linkage between parts and
+ * gparts, i.e. the received particles have correct linkeage.
  */
-void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
-                                  struct cell *cj, struct task *t_grav) {
+void engine_exchange_strays(struct engine *e, const size_t offset_parts,
+                            const int *ind_part, size_t *Npart,
+                            const size_t offset_gparts, const int *ind_gpart,
+                            size_t *Ngpart, const size_t offset_sparts,
+                            const int *ind_spart, size_t *Nspart) {
 
 #ifdef WITH_MPI
-  struct link *l = NULL;
-  struct scheduler *s = &e->sched;
-  const int nodeID = cj->nodeID;
-
-  /* Check if any of the gravity tasks are for the target node. */
-  for (l = ci->grav; l != NULL; l = l->next)
-    if (l->t->ci->nodeID == nodeID ||
-        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
-      break;
-
-  /* If so, attach send tasks. */
-  if (l != NULL) {
 
-    /* Create the tasks and their dependencies? */
-    if (t_grav == NULL) {
+  struct space *s = e->s;
+  ticks tic = getticks();
 
-      /* Create a tag for this cell. */
-      if (ci->tag < 0) cell_tag(ci);
+  /* Re-set the proxies. */
+  for (int k = 0; k < e->nr_proxies; k++) {
+    e->proxies[k].nr_parts_out = 0;
+    e->proxies[k].nr_gparts_out = 0;
+    e->proxies[k].nr_sparts_out = 0;
+  }
 
-      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart, ci->tag,
-                                 0, ci, cj);
+  /* Put the parts into the corresponding proxies. */
+  for (size_t k = 0; k < *Npart; k++) {
 
-      /* The sends should unlock the down pass. */
-      scheduler_addunlock(s, t_grav, ci->super_gravity->grav_down);
+    /* Ignore the particles we want to get rid of (inhibited, ...). */
+    if (ind_part[k] == -1) continue;
 
-      /* Drift before you send */
-      scheduler_addunlock(s, ci->super_gravity->drift_gpart, t_grav);
+    /* Get the target node and proxy ID. */
+    const int node_id = e->s->cells_top[ind_part[k]].nodeID;
+    if (node_id < 0 || node_id >= e->nr_nodes)
+      error("Bad node ID %i.", node_id);
+    const int pid = e->proxy_ind[node_id];
+    if (pid < 0) {
+      error(
+          "Do not have a proxy for the requested nodeID %i for part with "
+          "id=%lld, x=[%e,%e,%e].",
+          node_id, s->parts[offset_parts + k].id,
+          s->parts[offset_parts + k].x[0], s->parts[offset_parts + k].x[1],
+          s->parts[offset_parts + k].x[2]);
     }
 
-    /* Add them to the local cell. */
-    engine_addlink(e, &ci->send_grav, t_grav);
-  }
-
-  /* Recurse? */
-  if (ci->split)
-    for (int k = 0; k < 8; k++)
-      if (ci->progeny[k] != NULL)
-        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav);
+    /* Re-link the associated gpart with the buffer offset of the part. */
+    if (s->parts[offset_parts + k].gpart != NULL) {
+      s->parts[offset_parts + k].gpart->id_or_neg_offset =
+          -e->proxies[pid].nr_parts_out;
+    }
 
-#else
-  error("SWIFT was not compiled with MPI support.");
+#ifdef SWIFT_DEBUG_CHECKS
+    if (s->parts[offset_parts + k].time_bin == time_bin_inhibited)
+      error("Attempting to exchange an inhibited particle");
 #endif
-}
-
-/**
- * @brief Add send tasks for the time-step to a hierarchy of cells.
- *
- * @param e The #engine.
- * @param ci The sending #cell.
- * @param cj Dummy cell containing the nodeID of the receiving node.
- * @param t_ti The send_ti #task, if it has already been created.
- */
-void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
-                                   struct cell *cj, struct task *t_ti) {
-
-#ifdef WITH_MPI
-  struct link *l = NULL;
-  struct scheduler *s = &e->sched;
-  const int nodeID = cj->nodeID;
-
-  /* Check if any of the gravity tasks are for the target node. */
-  for (l = ci->grav; l != NULL; l = l->next)
-    if (l->t->ci->nodeID == nodeID ||
-        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
-      break;
-
-  /* Check whether instead any of the hydro tasks are for the target node. */
-  if (l == NULL)
-    for (l = ci->density; l != NULL; l = l->next)
-      if (l->t->ci->nodeID == nodeID ||
-          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
-        break;
-
-  /* If found anything, attach send tasks. */
-  if (l != NULL) {
-
-    /* Create the tasks and their dependencies? */
-    if (t_ti == NULL) {
-
-      /* Create a tag for this cell. */
-      if (ci->tag < 0) cell_tag(ci);
 
-      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend, ci->tag, 0,
-                               ci, cj);
-
-      /* The super-cell's timestep task should unlock the send_ti task. */
-      scheduler_addunlock(s, ci->super->timestep, t_ti);
-    }
-
-    /* Add them to the local cell. */
-    engine_addlink(e, &ci->send_ti, t_ti);
+    /* Load the part and xpart into the proxy. */
+    proxy_parts_load(&e->proxies[pid], &s->parts[offset_parts + k],
+                     &s->xparts[offset_parts + k], 1);
   }
 
-  /* Recurse? */
-  if (ci->split)
-    for (int k = 0; k < 8; k++)
-      if (ci->progeny[k] != NULL)
-        engine_addtasks_send_timestep(e, ci->progeny[k], cj, t_ti);
+  /* Put the sparts into the corresponding proxies. */
+  for (size_t k = 0; k < *Nspart; k++) {
 
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
+    /* Ignore the particles we want to get rid of (inhibited, ...). */
+    if (ind_spart[k] == -1) continue;
 
-/**
- * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
- *
- * @param e The #engine.
- * @param c The foreign #cell.
- * @param t_xv The recv_xv #task, if it has already been created.
- * @param t_rho The recv_rho #task, if it has already been created.
- * @param t_gradient The recv_gradient #task, if it has already been created.
- */
-void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
-                                struct task *t_xv, struct task *t_rho,
-                                struct task *t_gradient) {
-
-#ifdef WITH_MPI
-  struct scheduler *s = &e->sched;
-
-  /* Have we reached a level where there are any hydro tasks ? */
-  if (t_xv == NULL && c->density != NULL) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-    /* Make sure this cell has a valid tag. */
-    if (c->tag < 0) error("Trying to receive from untagged cell.");
-#endif  // SWIFT_DEBUG_CHECKS
-
-    /* Create the tasks. */
-    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->tag, 0, c,
-                             NULL);
-    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->tag, 0, c,
-                              NULL);
-#ifdef EXTRA_HYDRO_LOOP
-    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
-                                   c->tag, 0, c, NULL);
-#endif
-  }
-
-  c->recv_xv = t_xv;
-  c->recv_rho = t_rho;
-  c->recv_gradient = t_gradient;
-
-  /* Add dependencies. */
-  if (c->sorts != NULL) scheduler_addunlock(s, t_xv, c->sorts);
-
-  for (struct link *l = c->density; l != NULL; l = l->next) {
-    scheduler_addunlock(s, t_xv, l->t);
-    scheduler_addunlock(s, l->t, t_rho);
-  }
-#ifdef EXTRA_HYDRO_LOOP
-  for (struct link *l = c->gradient; l != NULL; l = l->next) {
-    scheduler_addunlock(s, t_rho, l->t);
-    scheduler_addunlock(s, l->t, t_gradient);
-  }
-  for (struct link *l = c->force; l != NULL; l = l->next)
-    scheduler_addunlock(s, t_gradient, l->t);
-#else
-  for (struct link *l = c->force; l != NULL; l = l->next)
-    scheduler_addunlock(s, t_rho, l->t);
-#endif
-
-  /* Recurse? */
-  if (c->split)
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL)
-        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient);
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
- * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
- *
- * @param e The #engine.
- * @param c The foreign #cell.
- * @param t_grav The recv_gpart #task, if it has already been created.
- */
-void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
-                                  struct task *t_grav) {
-
-#ifdef WITH_MPI
-  struct scheduler *s = &e->sched;
-
-  /* Have we reached a level where there are any gravity tasks ? */
-  if (t_grav == NULL && c->grav != NULL) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-    /* Make sure this cell has a valid tag. */
-    if (c->tag < 0) error("Trying to receive from untagged cell.");
-#endif  // SWIFT_DEBUG_CHECKS
-
-    /* Create the tasks. */
-    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart, c->tag, 0,
-                               c, NULL);
-  }
-
-  c->recv_grav = t_grav;
-
-  for (struct link *l = c->grav; l != NULL; l = l->next)
-    scheduler_addunlock(s, t_grav, l->t);
-
-  /* Recurse? */
-  if (c->split)
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL)
-        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav);
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
- * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
- *
- * @param e The #engine.
- * @param c The foreign #cell.
- * @param t_ti The recv_ti #task, if already been created.
- */
-void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
-                                   struct task *t_ti) {
-
-#ifdef WITH_MPI
-  struct scheduler *s = &e->sched;
-
-  /* Have we reached a level where there are any self/pair tasks ? */
-  if (t_ti == NULL && (c->grav != NULL || c->density != NULL)) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-    /* Make sure this cell has a valid tag. */
-    if (c->tag < 0) error("Trying to receive from untagged cell.");
-#endif  // SWIFT_DEBUG_CHECKS
-
-    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend, c->tag, 0, c,
-                             NULL);
-  }
-
-  c->recv_ti = t_ti;
-
-  for (struct link *l = c->grav; l != NULL; l = l->next)
-    scheduler_addunlock(s, l->t, t_ti);
-
-  for (struct link *l = c->force; l != NULL; l = l->next)
-    scheduler_addunlock(s, l->t, t_ti);
-
-  /* Recurse? */
-  if (c->split)
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL)
-        engine_addtasks_recv_timestep(e, c->progeny[k], t_ti);
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
- * @brief Exchange cell structures with other nodes.
- *
- * @param e The #engine.
- */
-void engine_exchange_cells(struct engine *e) {
-
-#ifdef WITH_MPI
-
-  struct space *s = e->s;
-  const int nr_proxies = e->nr_proxies;
-  const ticks tic = getticks();
-
-  /* Exchange the cell structure with neighbouring ranks. */
-  proxy_cells_exchange(e->proxies, e->nr_proxies, e->s);
-
-  /* Count the number of particles we need to import and re-allocate
-     the buffer if needed. */
-  size_t count_parts_in = 0, count_gparts_in = 0, count_sparts_in = 0;
-  for (int k = 0; k < nr_proxies; k++)
-    for (int j = 0; j < e->proxies[k].nr_cells_in; j++) {
-      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_hydro)
-        count_parts_in += e->proxies[k].cells_in[j]->count;
-      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_gravity)
-        count_gparts_in += e->proxies[k].cells_in[j]->gcount;
-      count_sparts_in += e->proxies[k].cells_in[j]->scount;
-    }
-  if (count_parts_in > s->size_parts_foreign) {
-    if (s->parts_foreign != NULL) free(s->parts_foreign);
-    s->size_parts_foreign = 1.1 * count_parts_in;
-    if (posix_memalign((void **)&s->parts_foreign, part_align,
-                       sizeof(struct part) * s->size_parts_foreign) != 0)
-      error("Failed to allocate foreign part data.");
-  }
-  memuse_report("parts_foreign", sizeof(struct part) * s->size_parts_foreign);
-
-  if (count_gparts_in > s->size_gparts_foreign) {
-    if (s->gparts_foreign != NULL) free(s->gparts_foreign);
-    s->size_gparts_foreign = 1.1 * count_gparts_in;
-    if (posix_memalign((void **)&s->gparts_foreign, gpart_align,
-                       sizeof(struct gpart) * s->size_gparts_foreign) != 0)
-      error("Failed to allocate foreign gpart data.");
-  }
-  memuse_report("gparts_foreign",
-                sizeof(struct gpart) * s->size_gparts_foreign);
-
-  if (count_sparts_in > s->size_sparts_foreign) {
-    if (s->sparts_foreign != NULL) free(s->sparts_foreign);
-    s->size_sparts_foreign = 1.1 * count_sparts_in;
-    if (posix_memalign((void **)&s->sparts_foreign, spart_align,
-                       sizeof(struct spart) * s->size_sparts_foreign) != 0)
-      error("Failed to allocate foreign spart data.");
-  }
-  memuse_report("sparts_foreign",
-                sizeof(struct spart) * s->size_sparts_foreign);
-
-  /* Unpack the cells and link to the particle data. */
-  struct part *parts = s->parts_foreign;
-  struct gpart *gparts = s->gparts_foreign;
-  struct spart *sparts = s->sparts_foreign;
-  for (int k = 0; k < nr_proxies; k++) {
-    for (int j = 0; j < e->proxies[k].nr_cells_in; j++) {
-
-      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_hydro) {
-        cell_link_parts(e->proxies[k].cells_in[j], parts);
-        parts = &parts[e->proxies[k].cells_in[j]->count];
-      }
-
-      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_gravity) {
-        cell_link_gparts(e->proxies[k].cells_in[j], gparts);
-        gparts = &gparts[e->proxies[k].cells_in[j]->gcount];
-      }
-
-      cell_link_sparts(e->proxies[k].cells_in[j], sparts);
-      sparts = &sparts[e->proxies[k].cells_in[j]->scount];
-    }
-  }
-  s->nr_parts_foreign = parts - s->parts_foreign;
-  s->nr_gparts_foreign = gparts - s->gparts_foreign;
-  s->nr_sparts_foreign = sparts - s->sparts_foreign;
-
-  if (e->verbose)
-    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
-            clocks_getunit());
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
- * @brief Exchange straying particles with other nodes.
- *
- * @param e The #engine.
- * @param offset_parts The index in the parts array as of which the foreign
- *        parts reside.
- * @param ind_part The foreign #cell ID of each part.
- * @param Npart The number of stray parts, contains the number of parts received
- *        on return.
- * @param offset_gparts The index in the gparts array as of which the foreign
- *        parts reside.
- * @param ind_gpart The foreign #cell ID of each gpart.
- * @param Ngpart The number of stray gparts, contains the number of gparts
- *        received on return.
- * @param offset_sparts The index in the sparts array as of which the foreign
- *        parts reside.
- * @param ind_spart The foreign #cell ID of each spart.
- * @param Nspart The number of stray sparts, contains the number of sparts
- *        received on return.
- *
- * Note that this function does not mess-up the linkage between parts and
- * gparts, i.e. the received particles have correct linkeage.
- */
-void engine_exchange_strays(struct engine *e, size_t offset_parts,
-                            int *ind_part, size_t *Npart, size_t offset_gparts,
-                            int *ind_gpart, size_t *Ngpart,
-                            size_t offset_sparts, int *ind_spart,
-                            size_t *Nspart) {
-
-#ifdef WITH_MPI
-
-  struct space *s = e->s;
-  ticks tic = getticks();
-
-  /* Re-set the proxies. */
-  for (int k = 0; k < e->nr_proxies; k++) {
-    e->proxies[k].nr_parts_out = 0;
-    e->proxies[k].nr_gparts_out = 0;
-    e->proxies[k].nr_sparts_out = 0;
-  }
-
-  /* Put the parts into the corresponding proxies. */
-  for (size_t k = 0; k < *Npart; k++) {
     /* Get the target node and proxy ID. */
-    const int node_id = e->s->cells_top[ind_part[k]].nodeID;
-    if (node_id < 0 || node_id >= e->nr_nodes)
-      error("Bad node ID %i.", node_id);
-    const int pid = e->proxy_ind[node_id];
-    if (pid < 0) {
-      error(
-          "Do not have a proxy for the requested nodeID %i for part with "
-          "id=%lld, x=[%e,%e,%e].",
-          node_id, s->parts[offset_parts + k].id,
-          s->parts[offset_parts + k].x[0], s->parts[offset_parts + k].x[1],
-          s->parts[offset_parts + k].x[2]);
-    }
-
-    /* Re-link the associated gpart with the buffer offset of the part. */
-    if (s->parts[offset_parts + k].gpart != NULL) {
-      s->parts[offset_parts + k].gpart->id_or_neg_offset =
-          -e->proxies[pid].nr_parts_out;
-    }
-
-    /* Load the part and xpart into the proxy. */
-    proxy_parts_load(&e->proxies[pid], &s->parts[offset_parts + k],
-                     &s->xparts[offset_parts + k], 1);
-  }
-
-  /* Put the sparts into the corresponding proxies. */
-  for (size_t k = 0; k < *Nspart; k++) {
     const int node_id = e->s->cells_top[ind_spart[k]].nodeID;
     if (node_id < 0 || node_id >= e->nr_nodes)
       error("Bad node ID %i.", node_id);
     const int pid = e->proxy_ind[node_id];
-    if (pid < 0)
+    if (pid < 0) {
       error(
           "Do not have a proxy for the requested nodeID %i for part with "
           "id=%lld, x=[%e,%e,%e].",
           node_id, s->sparts[offset_sparts + k].id,
           s->sparts[offset_sparts + k].x[0], s->sparts[offset_sparts + k].x[1],
           s->sparts[offset_sparts + k].x[2]);
+    }
 
     /* Re-link the associated gpart with the buffer offset of the spart. */
     if (s->sparts[offset_sparts + k].gpart != NULL) {
@@ -1841,23 +1283,39 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts,
           -e->proxies[pid].nr_sparts_out;
     }
 
+#ifdef SWIFT_DEBUG_CHECKS
+    if (s->sparts[offset_sparts + k].time_bin == time_bin_inhibited)
+      error("Attempting to exchange an inhibited particle");
+#endif
+
     /* Load the spart into the proxy */
     proxy_sparts_load(&e->proxies[pid], &s->sparts[offset_sparts + k], 1);
   }
 
   /* Put the gparts into the corresponding proxies. */
   for (size_t k = 0; k < *Ngpart; k++) {
+
+    /* Ignore the particles we want to get rid of (inhibited, ...). */
+    if (ind_gpart[k] == -1) continue;
+
+    /* Get the target node and proxy ID. */
     const int node_id = e->s->cells_top[ind_gpart[k]].nodeID;
     if (node_id < 0 || node_id >= e->nr_nodes)
       error("Bad node ID %i.", node_id);
     const int pid = e->proxy_ind[node_id];
-    if (pid < 0)
+    if (pid < 0) {
       error(
           "Do not have a proxy for the requested nodeID %i for part with "
           "id=%lli, x=[%e,%e,%e].",
           node_id, s->gparts[offset_gparts + k].id_or_neg_offset,
           s->gparts[offset_gparts + k].x[0], s->gparts[offset_gparts + k].x[1],
           s->gparts[offset_gparts + k].x[2]);
+    }
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (s->gparts[offset_gparts + k].time_bin == time_bin_inhibited)
+      error("Attempting to exchange an inhibited particle");
+#endif
 
     /* Load the gpart into the proxy */
     proxy_gparts_load(&e->proxies[pid], &s->gparts[offset_gparts + k], 1);
@@ -1920,6 +1378,8 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts,
     free(s->xparts);
     s->parts = parts_new;
     s->xparts = xparts_new;
+
+    /* Reset the links */
     for (size_t k = 0; k < offset_parts; k++) {
       if (s->parts[k].gpart != NULL) {
         s->parts[k].gpart->id_or_neg_offset = -k;
@@ -1939,6 +1399,8 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts,
     memcpy(sparts_new, s->sparts, sizeof(struct spart) * offset_sparts);
     free(s->sparts);
     s->sparts = sparts_new;
+
+    /* Reset the links */
     for (size_t k = 0; k < offset_sparts; k++) {
       if (s->sparts[k].gpart != NULL) {
         s->sparts[k].gpart->id_or_neg_offset = -k;
@@ -1958,10 +1420,11 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts,
     free(s->gparts);
     s->gparts = gparts_new;
 
+    /* Reset the links */
     for (size_t k = 0; k < offset_gparts; k++) {
       if (s->gparts[k].type == swift_type_gas) {
         s->parts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
-      } else if (s->gparts[k].type == swift_type_star) {
+      } else if (s->gparts[k].type == swift_type_stars) {
         s->sparts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
       }
     }
@@ -2058,7 +1521,7 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts,
               &s->parts[offset_parts + count_parts - gp->id_or_neg_offset];
           gp->id_or_neg_offset = s->parts - p;
           p->gpart = gp;
-        } else if (gp->type == swift_type_star) {
+        } else if (gp->type == swift_type_stars) {
           struct spart *sp =
               &s->sparts[offset_sparts + count_sparts - gp->id_or_neg_offset];
           gp->id_or_neg_offset = s->sparts - sp;
@@ -2103,6 +1566,8 @@ void engine_exchange_top_multipoles(struct engine *e) {
 
 #ifdef WITH_MPI
 
+  ticks tic = getticks();
+
 #ifdef SWIFT_DEBUG_CHECKS
   for (int i = 0; i < e->s->nr_cells; ++i) {
     const struct gravity_tensors *m = &e->s->multipoles_top[i];
@@ -2129,19 +1594,16 @@ void engine_exchange_top_multipoles(struct engine *e) {
   /* Each node (space) has constructed its own top-level multipoles.
    * We now need to make sure every other node has a copy of everything.
    *
-   * WARNING: Adult stuff ahead: don't do this at home!
-   *
-   * Since all nodes have their top-level multi-poles computed
-   * and all foreign ones set to 0 (all bytes), we can gather all the m-poles
-   * by doing a bit-wise OR reduction across all the nodes directly in
-   * place inside the multi-poles_top array.
-   * This only works if the foreign m-poles on every nodes are zeroed and no
-   * multi-pole is present on more than one node (two things guaranteed by the
-   * domain decomposition).
+   * We use our home-made reduction operation that simply performs a XOR
+   * operation on the multipoles. Since only local multipoles are non-zero and
+   * each multipole is only present once, the bit-by-bit XOR will
+   * create the desired result.
    */
-  MPI_Allreduce(MPI_IN_PLACE, e->s->multipoles_top,
-                e->s->nr_cells * sizeof(struct gravity_tensors), MPI_BYTE,
-                MPI_BOR, MPI_COMM_WORLD);
+  int err = MPI_Allreduce(MPI_IN_PLACE, e->s->multipoles_top, e->s->nr_cells,
+                          multipole_mpi_type, multipole_mpi_reduce_op,
+                          MPI_COMM_WORLD);
+  if (err != MPI_SUCCESS)
+    mpi_error(err, "Failed to all-reduce the top-level multipoles.");
 
 #ifdef SWIFT_DEBUG_CHECKS
   long long counter = 0;
@@ -2150,6 +1612,9 @@ void engine_exchange_top_multipoles(struct engine *e) {
   for (int i = 0; i < e->s->nr_cells; ++i) {
     const struct gravity_tensors *m = &e->s->multipoles_top[i];
     counter += m->m_pole.num_gpart;
+    if (m->m_pole.num_gpart < 0) {
+      error("m->m_pole.num_gpart is negative: %lld", m->m_pole.num_gpart);
+    }
     if (m->m_pole.M_000 > 0.) {
       if (m->CoM[0] < 0. || m->CoM[0] > e->s->dim[0])
         error("Invalid multipole position in X");
@@ -2160,9 +1625,15 @@ void engine_exchange_top_multipoles(struct engine *e) {
     }
   }
   if (counter != e->total_nr_gparts)
-    error("Total particles in multipoles inconsistent with engine");
+    error(
+        "Total particles in multipoles inconsistent with engine.\n "
+        "  counter = %lld, nr_gparts = %lld",
+        counter, e->total_nr_gparts);
 #endif
 
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 #else
   error("SWIFT was not compiled with MPI support.");
 #endif
@@ -2175,8 +1646,8 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
   const ticks tic = getticks();
 
   /* Start by counting the number of cells to send and receive */
-  int count_send = 0;
-  int count_recv = 0;
+  int count_send_cells = 0;
+  int count_recv_cells = 0;
   int count_send_requests = 0;
   int count_recv_requests = 0;
 
@@ -2192,21 +1663,21 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
 
     /* And the actual number of things we are going to ship */
     for (int k = 0; k < p->nr_cells_in; k++)
-      count_recv += p->cells_in[k]->pcell_size;
+      count_recv_cells += p->cells_in[k]->mpi.pcell_size;
 
     for (int k = 0; k < p->nr_cells_out; k++)
-      count_send += p->cells_out[k]->pcell_size;
+      count_send_cells += p->cells_out[k]->mpi.pcell_size;
   }
 
   /* Allocate the buffers for the packed data */
   struct gravity_tensors *buffer_send = NULL;
   if (posix_memalign((void **)&buffer_send, SWIFT_CACHE_ALIGNMENT,
-                     count_send * sizeof(struct gravity_tensors)) != 0)
+                     count_send_cells * sizeof(struct gravity_tensors)) != 0)
     error("Unable to allocate memory for multipole transactions");
 
   struct gravity_tensors *buffer_recv = NULL;
   if (posix_memalign((void **)&buffer_recv, SWIFT_CACHE_ALIGNMENT,
-                     count_recv * sizeof(struct gravity_tensors)) != 0)
+                     count_recv_cells * sizeof(struct gravity_tensors)) != 0)
     error("Unable to allocate memory for multipole transactions");
 
   /* Also allocate the MPI requests */
@@ -2227,12 +1698,11 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
 
     for (int k = 0; k < p->nr_cells_in; k++) {
 
-      const int num_elements = p->cells_in[k]->pcell_size;
+      const int num_elements = p->cells_in[k]->mpi.pcell_size;
 
       /* Receive everything */
-      MPI_Irecv(&buffer_recv[this_recv],
-                num_elements * sizeof(struct gravity_tensors), MPI_BYTE,
-                p->cells_in[k]->nodeID, p->cells_in[k]->tag, MPI_COMM_WORLD,
+      MPI_Irecv(&buffer_recv[this_recv], num_elements, multipole_mpi_type,
+                p->cells_in[k]->nodeID, p->cells_in[k]->mpi.tag, MPI_COMM_WORLD,
                 &requests[this_request]);
 
       /* Move to the next slot in the buffers */
@@ -2244,17 +1714,16 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
     for (int k = 0; k < p->nr_cells_out; k++) {
 
       /* Number of multipoles in this cell hierarchy */
-      const int num_elements = p->cells_out[k]->pcell_size;
+      const int num_elements = p->cells_out[k]->mpi.pcell_size;
 
       /* Let's pack everything recursively */
       cell_pack_multipoles(p->cells_out[k], &buffer_send[this_send]);
 
       /* Send everything (note the use of cells_in[0] to get the correct node
        * ID. */
-      MPI_Isend(&buffer_send[this_send],
-                num_elements * sizeof(struct gravity_tensors), MPI_BYTE,
-                p->cells_in[0]->nodeID, p->cells_out[k]->tag, MPI_COMM_WORLD,
-                &requests[this_request]);
+      MPI_Isend(&buffer_send[this_send], num_elements, multipole_mpi_type,
+                p->cells_in[0]->nodeID, p->cells_out[k]->mpi.tag,
+                MPI_COMM_WORLD, &requests[this_request]);
 
       /* Move to the next slot in the buffers */
       this_send += num_elements;
@@ -2284,17 +1753,17 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
 
     for (int k = 0; k < p->nr_cells_in; k++) {
 
-      const int num_elements = p->cells_in[k]->pcell_size;
+      const int num_elements = p->cells_in[k]->mpi.pcell_size;
 
 #ifdef SWIFT_DEBUG_CHECKS
 
       /* Check that the first element (top-level cell's multipole) matches what
        * we received */
-      if (p->cells_in[k]->multipole->m_pole.num_gpart !=
+      if (p->cells_in[k]->grav.multipole->m_pole.num_gpart !=
           buffer_recv[this_recv].m_pole.num_gpart)
         error("Current: M_000=%e num_gpart=%lld\n New: M_000=%e num_gpart=%lld",
-              p->cells_in[k]->multipole->m_pole.M_000,
-              p->cells_in[k]->multipole->m_pole.num_gpart,
+              p->cells_in[k]->grav.multipole->m_pole.M_000,
+              p->cells_in[k]->grav.multipole->m_pole.num_gpart,
               buffer_recv[this_recv].m_pole.M_000,
               buffer_recv[this_recv].m_pole.num_gpart);
 #endif
@@ -2323,1470 +1792,163 @@ void engine_exchange_proxy_multipoles(struct engine *e) {
 }
 
 /**
- * @brief Constructs the top-level tasks for the short-range gravity
- * and long-range gravity interactions.
+ * @brief Allocate memory for the foreign particles.
+ *
+ * We look into the proxies for cells that have tasks and count
+ * the number of particles in these cells. We then allocate
+ * memory and link all the cells that have tasks and all cells
+ * deeper in the tree.
  *
- * - All top-cells get a self task.
- * - All pairs within range according to the multipole acceptance
- *   criterion get a pair task.
+ * @param e The #engine.
  */
-void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements,
-                                           void *extra_data) {
+void engine_allocate_foreign_particles(struct engine *e) {
+
+#ifdef WITH_MPI
 
-  struct engine *e = ((struct engine **)extra_data)[0];
+  const int nr_proxies = e->nr_proxies;
   struct space *s = e->s;
-  struct scheduler *sched = &e->sched;
-  const int nodeID = e->nodeID;
-  const int periodic = s->periodic;
-  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
-  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
-  struct cell *cells = s->cells_top;
-  const double theta_crit = e->gravity_properties->theta_crit;
-  const double max_distance = e->mesh->r_cut_max;
+  ticks tic = getticks();
 
-  /* Compute how many cells away we need to walk */
-  const double distance = 2.5 * cells[0].width[0] / theta_crit;
-  int delta = (int)(distance / cells[0].width[0]) + 1;
-  int delta_m = delta;
-  int delta_p = delta;
+  /* Count the number of particles we need to import and re-allocate
+     the buffer if needed. */
+  size_t count_parts_in = 0, count_gparts_in = 0, count_sparts_in = 0;
+  for (int k = 0; k < nr_proxies; k++) {
+    for (int j = 0; j < e->proxies[k].nr_cells_in; j++) {
 
-  /* Special case where every cell is in range of every other one */
-  if (delta >= cdim[0] / 2) {
-    if (cdim[0] % 2 == 0) {
-      delta_m = cdim[0] / 2;
-      delta_p = cdim[0] / 2 - 1;
-    } else {
-      delta_m = cdim[0] / 2;
-      delta_p = cdim[0] / 2;
+      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_hydro) {
+        count_parts_in += cell_count_parts_for_tasks(e->proxies[k].cells_in[j]);
+      }
+
+      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_gravity) {
+        count_gparts_in +=
+            cell_count_gparts_for_tasks(e->proxies[k].cells_in[j]);
+      }
+
+      /* For stars, we just use the numbers in the top-level cells */
+      count_sparts_in += e->proxies[k].cells_in[j]->stars.count;
     }
   }
 
-  /* Loop through the elements, which are just byte offsets from NULL. */
-  for (int ind = 0; ind < num_elements; ind++) {
-
-    /* Get the cell index. */
-    const int cid = (size_t)(map_data) + ind;
-
-    /* Integer indices of the cell in the top-level grid */
-    const int i = cid / (cdim[1] * cdim[2]);
-    const int j = (cid / cdim[2]) % cdim[1];
-    const int k = cid % cdim[2];
-
-    /* Get the cell */
-    struct cell *ci = &cells[cid];
-
-    /* Skip cells without gravity particles */
-    if (ci->gcount == 0) continue;
-
-    /* Is that cell local ? */
-    if (ci->nodeID != nodeID) continue;
-
-    /* If the cells is local build a self-interaction */
-    scheduler_addtask(sched, task_type_self, task_subtype_grav, 0, 0, ci, NULL);
-
-    /* Recover the multipole information */
-    const struct gravity_tensors *const multi_i = ci->multipole;
-    const double CoM_i[3] = {multi_i->CoM[0], multi_i->CoM[1], multi_i->CoM[2]};
-
-#ifdef SWIFT_DEBUG_CHECKS
-    if (cell_getid(cdim, i, j, k) != cid)
-      error("Incorrect calculation of indices (i,j,k)=(%d,%d,%d) cid=%d", i, j,
-            k, cid);
-
-    if (multi_i->r_max != multi_i->r_max_rebuild)
-      error(
-          "Multipole size not equal ot it's size after rebuild. But we just "
-          "rebuilt...");
-#endif
-
-    /* Loop over every other cell within (Manhattan) range delta */
-    for (int x = -delta_m; x <= delta_p; x++) {
-      int ii = i + x;
-      if (ii >= cdim[0])
-        ii -= cdim[0];
-      else if (ii < 0)
-        ii += cdim[0];
-      for (int y = -delta_m; y <= delta_p; y++) {
-        int jj = j + y;
-        if (jj >= cdim[1])
-          jj -= cdim[1];
-        else if (jj < 0)
-          jj += cdim[1];
-        for (int z = -delta_m; z <= delta_p; z++) {
-          int kk = k + z;
-          if (kk >= cdim[2])
-            kk -= cdim[2];
-          else if (kk < 0)
-            kk += cdim[2];
-
-          /* Get the cell */
-          const int cjd = cell_getid(cdim, ii, jj, kk);
-          struct cell *cj = &cells[cjd];
-
-#ifdef SWIFT_DEBUG_CHECKS
-          const int iii = cjd / (cdim[1] * cdim[2]);
-          const int jjj = (cjd / cdim[2]) % cdim[1];
-          const int kkk = cjd % cdim[2];
-
-          if (ii != iii || jj != jjj || kk != kkk)
-            error(
-                "Incorrect calculation of indices (iii,jjj,kkk)=(%d,%d,%d) "
-                "cjd=%d",
-                iii, jjj, kkk, cjd);
-#endif
-
-          /* Avoid duplicates of local pairs*/
-          if (cid <= cjd && cj->nodeID == nodeID) continue;
-
-          /* Skip cells without gravity particles */
-          if (cj->gcount == 0) continue;
-
-          /* Recover the multipole information */
-          const struct gravity_tensors *const multi_j = cj->multipole;
-
-          /* Get the distance between the CoMs */
-          double dx = CoM_i[0] - multi_j->CoM[0];
-          double dy = CoM_i[1] - multi_j->CoM[1];
-          double dz = CoM_i[2] - multi_j->CoM[2];
-
-          /* Apply BC */
-          if (periodic) {
-            dx = nearest(dx, dim[0]);
-            dy = nearest(dy, dim[1]);
-            dz = nearest(dz, dim[2]);
-          }
-          const double r2 = dx * dx + dy * dy + dz * dz;
-
-          /* Minimal distance between any pair of particles */
-          const double min_radius =
-              sqrt(r2) - (multi_i->r_max + multi_j->r_max);
-
-          /* Are we beyond the distance where the truncated forces are 0 ?*/
-          if (periodic && min_radius > max_distance) continue;
-
-          /* Are the cells too close for a MM interaction ? */
-          if (!cell_can_use_pair_mm(ci, cj, e, s)) {
-
-            /* Ok, we need to add a direct pair calculation */
-            scheduler_addtask(sched, task_type_pair, task_subtype_grav, 0, 0,
-                              ci, cj);
-          }
-        }
-      }
-    }
-  }
-}
-
-/**
- * @brief Constructs the top-level tasks for the short-range gravity
- * interactions (master function).
- *
- * - Create the FFT task and the array of gravity ghosts.
- * - Call the mapper function to create the other tasks.
- *
- * @param e The #engine.
- */
-void engine_make_self_gravity_tasks(struct engine *e) {
-
-  struct space *s = e->s;
-  struct task **ghosts = NULL;
-
-  /* Create the multipole self and pair tasks. */
-  void *extra_data[2] = {e, ghosts};
-  threadpool_map(&e->threadpool, engine_make_self_gravity_tasks_mapper, NULL,
-                 s->nr_cells, 1, 0, extra_data);
-}
-
-/**
- * @brief Constructs the top-level tasks for the external gravity.
- *
- * @param e The #engine.
- */
-void engine_make_external_gravity_tasks(struct engine *e) {
-
-  struct space *s = e->s;
-  struct scheduler *sched = &e->sched;
-  const int nodeID = e->nodeID;
-  struct cell *cells = s->cells_top;
-  const int nr_cells = s->nr_cells;
-
-  for (int cid = 0; cid < nr_cells; ++cid) {
-
-    struct cell *ci = &cells[cid];
-
-    /* Skip cells without gravity particles */
-    if (ci->gcount == 0) continue;
-
-    /* Is that neighbour local ? */
-    if (ci->nodeID != nodeID) continue;
-
-    /* If the cell is local, build a self-interaction */
-    scheduler_addtask(sched, task_type_self, task_subtype_external_grav, 0, 0,
-                      ci, NULL);
-  }
-}
-
-/**
- * @brief Constructs the top-level pair tasks for the first hydro loop over
- * neighbours
- *
- * Here we construct all the tasks for all possible neighbouring non-empty
- * local cells in the hierarchy. No dependencies are being added thus far.
- * Additional loop over neighbours can later be added by simply duplicating
- * all the tasks created by this function.
- *
- * @param map_data Offset of first two indices disguised as a pointer.
- * @param num_elements Number of cells to traverse.
- * @param extra_data The #engine.
- */
-void engine_make_hydroloop_tasks_mapper(void *map_data, int num_elements,
-                                        void *extra_data) {
-
-  /* Extract the engine pointer. */
-  struct engine *e = (struct engine *)extra_data;
-
-  struct space *s = e->s;
-  struct scheduler *sched = &e->sched;
-  const int nodeID = e->nodeID;
-  const int *cdim = s->cdim;
-  struct cell *cells = s->cells_top;
-
-  /* Loop through the elements, which are just byte offsets from NULL. */
-  for (int ind = 0; ind < num_elements; ind++) {
-
-    /* Get the cell index. */
-    const int cid = (size_t)(map_data) + ind;
-    const int i = cid / (cdim[1] * cdim[2]);
-    const int j = (cid / cdim[2]) % cdim[1];
-    const int k = cid % cdim[2];
-
-    /* Get the cell */
-    struct cell *ci = &cells[cid];
-
-    /* Skip cells without hydro particles */
-    if (ci->count == 0) continue;
-
-    /* If the cells is local build a self-interaction */
-    if (ci->nodeID == nodeID)
-      scheduler_addtask(sched, task_type_self, task_subtype_density, 0, 0, ci,
-                        NULL);
-
-    /* Now loop over all the neighbours of this cell */
-    for (int ii = -1; ii < 2; ii++) {
-      int iii = i + ii;
-      if (!s->periodic && (iii < 0 || iii >= cdim[0])) continue;
-      iii = (iii + cdim[0]) % cdim[0];
-      for (int jj = -1; jj < 2; jj++) {
-        int jjj = j + jj;
-        if (!s->periodic && (jjj < 0 || jjj >= cdim[1])) continue;
-        jjj = (jjj + cdim[1]) % cdim[1];
-        for (int kk = -1; kk < 2; kk++) {
-          int kkk = k + kk;
-          if (!s->periodic && (kkk < 0 || kkk >= cdim[2])) continue;
-          kkk = (kkk + cdim[2]) % cdim[2];
-
-          /* Get the neighbouring cell */
-          const int cjd = cell_getid(cdim, iii, jjj, kkk);
-          struct cell *cj = &cells[cjd];
-
-          /* Is that neighbour local and does it have particles ? */
-          if (cid >= cjd || cj->count == 0 ||
-              (ci->nodeID != nodeID && cj->nodeID != nodeID))
-            continue;
-
-          /* Construct the pair task */
-          const int sid = sortlistID[(kk + 1) + 3 * ((jj + 1) + 3 * (ii + 1))];
-          scheduler_addtask(sched, task_type_pair, task_subtype_density, sid, 0,
-                            ci, cj);
-        }
-      }
-    }
-  }
-}
-
-/**
- * @brief Counts the tasks associated with one cell and constructs the links
- *
- * For each hydrodynamic and gravity task, construct the links with
- * the corresponding cell.  Similarly, construct the dependencies for
- * all the sorting tasks.
- */
-void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
-                                        void *extra_data) {
-
-  struct engine *e = (struct engine *)extra_data;
-  struct scheduler *const sched = &e->sched;
-
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct task *t = &((struct task *)map_data)[ind];
-
-    struct cell *ci = t->ci;
-    struct cell *cj = t->cj;
-    const enum task_types t_type = t->type;
-    const enum task_subtypes t_subtype = t->subtype;
-
-    /* Link sort tasks to all the higher sort task. */
-    if (t_type == task_type_sort) {
-      for (struct cell *finger = t->ci->parent; finger != NULL;
-           finger = finger->parent)
-        if (finger->sorts != NULL) scheduler_addunlock(sched, t, finger->sorts);
-    }
-
-    /* Link self tasks to cells. */
-    else if (t_type == task_type_self) {
-      atomic_inc(&ci->nr_tasks);
-
-      if (t_subtype == task_subtype_density) {
-        engine_addlink(e, &ci->density, t);
-      } else if (t_subtype == task_subtype_grav) {
-        engine_addlink(e, &ci->grav, t);
-      } else if (t_subtype == task_subtype_external_grav) {
-        engine_addlink(e, &ci->grav, t);
-      }
-
-      /* Link pair tasks to cells. */
-    } else if (t_type == task_type_pair) {
-      atomic_inc(&ci->nr_tasks);
-      atomic_inc(&cj->nr_tasks);
-
-      if (t_subtype == task_subtype_density) {
-        engine_addlink(e, &ci->density, t);
-        engine_addlink(e, &cj->density, t);
-      } else if (t_subtype == task_subtype_grav) {
-        engine_addlink(e, &ci->grav, t);
-        engine_addlink(e, &cj->grav, t);
-      }
-#ifdef SWIFT_DEBUG_CHECKS
-      else if (t_subtype == task_subtype_external_grav) {
-        error("Found a pair/external-gravity task...");
-      }
-#endif
-
-      /* Link sub-self tasks to cells. */
-    } else if (t_type == task_type_sub_self) {
-      atomic_inc(&ci->nr_tasks);
-
-      if (t_subtype == task_subtype_density) {
-        engine_addlink(e, &ci->density, t);
-      } else if (t_subtype == task_subtype_grav) {
-        engine_addlink(e, &ci->grav, t);
-      } else if (t_subtype == task_subtype_external_grav) {
-        engine_addlink(e, &ci->grav, t);
-      }
-
-      /* Link sub-pair tasks to cells. */
-    } else if (t_type == task_type_sub_pair) {
-      atomic_inc(&ci->nr_tasks);
-      atomic_inc(&cj->nr_tasks);
-
-      if (t_subtype == task_subtype_density) {
-        engine_addlink(e, &ci->density, t);
-        engine_addlink(e, &cj->density, t);
-      } else if (t_subtype == task_subtype_grav) {
-        engine_addlink(e, &ci->grav, t);
-        engine_addlink(e, &cj->grav, t);
-      }
-#ifdef SWIFT_DEBUG_CHECKS
-      else if (t_subtype == task_subtype_external_grav) {
-        error("Found a sub-pair/external-gravity task...");
-      }
-#endif
-
-      /* Note that we do not need to link the M-M tasks */
-      /* since we already did so when splitting the gravity */
-      /* tasks. */
-    }
-  }
-}
-
-/**
- * @brief Creates all the task dependencies for the gravity
- *
- * @param e The #engine
- */
-void engine_link_gravity_tasks(struct engine *e) {
-
-  struct scheduler *sched = &e->sched;
-  const int nodeID = e->nodeID;
-  const int nr_tasks = sched->nr_tasks;
-
-  for (int k = 0; k < nr_tasks; k++) {
-
-    /* Get a pointer to the task. */
-    struct task *t = &sched->tasks[k];
-
-    if (t->type == task_type_none) continue;
-
-    /* Get the cells we act on */
-    struct cell *ci = t->ci;
-    struct cell *cj = t->cj;
-    const enum task_types t_type = t->type;
-    const enum task_subtypes t_subtype = t->subtype;
-
-/* Node ID (if running with MPI) */
-#ifdef WITH_MPI
-    const int ci_nodeID = ci->nodeID;
-    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
-#else
-    const int ci_nodeID = nodeID;
-    const int cj_nodeID = nodeID;
-#endif
-
-    /* Self-interaction for self-gravity? */
-    if (t_type == task_type_self && t_subtype == task_subtype_grav) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (ci_nodeID != nodeID) error("Non-local self task");
-#endif
-
-      /* drift ---+-> gravity --> grav_down */
-      /* init  --/    */
-      scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-      scheduler_addunlock(sched, ci->init_grav_out, t);
-      scheduler_addunlock(sched, t, ci->grav_down_in);
-    }
-
-    /* Self-interaction for external gravity ? */
-    if (t_type == task_type_self && t_subtype == task_subtype_external_grav) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (ci_nodeID != nodeID) error("Non-local self task");
-#endif
-
-      /* drift -----> gravity --> end_force */
-      scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-      scheduler_addunlock(sched, t, ci->end_force);
-    }
-
-    /* Otherwise, pair interaction? */
-    else if (t_type == task_type_pair && t_subtype == task_subtype_grav) {
-
-      if (ci_nodeID == nodeID) {
-
-        /* drift ---+-> gravity --> grav_down */
-        /* init  --/    */
-        scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-        scheduler_addunlock(sched, ci->init_grav_out, t);
-        scheduler_addunlock(sched, t, ci->grav_down_in);
-      }
-      if (cj_nodeID == nodeID) {
-
-        /* drift ---+-> gravity --> grav_down */
-        /* init  --/    */
-        if (ci->super_gravity != cj->super_gravity) /* Avoid double unlock */
-          scheduler_addunlock(sched, cj->super_gravity->drift_gpart, t);
-        scheduler_addunlock(sched, cj->init_grav_out, t);
-        scheduler_addunlock(sched, t, cj->grav_down_in);
-      }
-    }
-
-    /* Otherwise, sub-self interaction? */
-    else if (t_type == task_type_sub_self && t_subtype == task_subtype_grav) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (ci_nodeID != nodeID) error("Non-local sub-self task");
-#endif
-      /* drift ---+-> gravity --> grav_down */
-      /* init  --/    */
-      scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-      scheduler_addunlock(sched, ci->init_grav_out, t);
-      scheduler_addunlock(sched, t, ci->grav_down_in);
-    }
-
-    /* Sub-self-interaction for external gravity ? */
-    else if (t_type == task_type_sub_self &&
-             t_subtype == task_subtype_external_grav) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (ci_nodeID != nodeID) error("Non-local sub-self task");
-#endif
-
-      /* drift -----> gravity --> end_force */
-      scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-      scheduler_addunlock(sched, t, ci->end_force);
-    }
-
-    /* Otherwise, sub-pair interaction? */
-    else if (t_type == task_type_sub_pair && t_subtype == task_subtype_grav) {
-
-      if (ci_nodeID == nodeID) {
-
-        /* drift ---+-> gravity --> grav_down */
-        /* init  --/    */
-        scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
-        scheduler_addunlock(sched, ci->init_grav_out, t);
-        scheduler_addunlock(sched, t, ci->grav_down_in);
-      }
-      if (cj_nodeID == nodeID) {
-
-        /* drift ---+-> gravity --> grav_down */
-        /* init  --/    */
-        if (ci->super_gravity != cj->super_gravity) /* Avoid double unlock */
-          scheduler_addunlock(sched, cj->super_gravity->drift_gpart, t);
-        scheduler_addunlock(sched, cj->init_grav_out, t);
-        scheduler_addunlock(sched, t, cj->grav_down_in);
-      }
-    }
-
-    /* Otherwise M-M interaction? */
-    else if (t_type == task_type_grav_mm) {
-
-      if (ci_nodeID == nodeID) {
-
-        /* init -----> gravity --> grav_down */
-        scheduler_addunlock(sched, ci->init_grav_out, t);
-        scheduler_addunlock(sched, t, ci->grav_down_in);
-      }
-      if (cj_nodeID == nodeID) {
-
-        /* init -----> gravity --> grav_down */
-        scheduler_addunlock(sched, cj->init_grav_out, t);
-        scheduler_addunlock(sched, t, cj->grav_down_in);
-      }
-    }
-  }
-}
-
-#ifdef EXTRA_HYDRO_LOOP
-
-/**
- * @brief Creates the dependency network for the hydro tasks of a given cell.
- *
- * @param sched The #scheduler.
- * @param density The density task to link.
- * @param gradient The gradient task to link.
- * @param force The force task to link.
- * @param c The cell.
- * @param with_cooling Do we have a cooling task ?
- */
-static inline void engine_make_hydro_loops_dependencies(
-    struct scheduler *sched, struct task *density, struct task *gradient,
-    struct task *force, struct cell *c, int with_cooling) {
-
-  /* density loop --> ghost --> gradient loop --> extra_ghost */
-  /* extra_ghost --> force loop  */
-  scheduler_addunlock(sched, density, c->super_hydro->ghost_in);
-  scheduler_addunlock(sched, c->super_hydro->ghost_out, gradient);
-  scheduler_addunlock(sched, gradient, c->super_hydro->extra_ghost);
-  scheduler_addunlock(sched, c->super_hydro->extra_ghost, force);
-}
-
-#else
-
-/**
- * @brief Creates the dependency network for the hydro tasks of a given cell.
- *
- * @param sched The #scheduler.
- * @param density The density task to link.
- * @param force The force task to link.
- * @param c The cell.
- * @param with_cooling Are we running with cooling switched on ?
- */
-static inline void engine_make_hydro_loops_dependencies(struct scheduler *sched,
-                                                        struct task *density,
-                                                        struct task *force,
-                                                        struct cell *c,
-                                                        int with_cooling) {
-  /* density loop --> ghost --> force loop */
-  scheduler_addunlock(sched, density, c->super_hydro->ghost_in);
-  scheduler_addunlock(sched, c->super_hydro->ghost_out, force);
-}
-
-#endif
-/**
- * @brief Duplicates the first hydro loop and construct all the
- * dependencies for the hydro part
- *
- * This is done by looping over all the previously constructed tasks
- * and adding another task involving the same cells but this time
- * corresponding to the second hydro loop over neighbours.
- * With all the relevant tasks for a given cell available, we construct
- * all the dependencies for that cell.
- */
-void engine_make_extra_hydroloop_tasks_mapper(void *map_data, int num_elements,
-                                              void *extra_data) {
-
-  struct engine *e = (struct engine *)extra_data;
-  struct scheduler *sched = &e->sched;
-  const int nodeID = e->nodeID;
-  const int with_cooling = (e->policy & engine_policy_cooling);
-
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct task *t = &((struct task *)map_data)[ind];
-
-    /* Sort tasks depend on the drift of the cell. */
-    if (t->type == task_type_sort && t->ci->nodeID == engine_rank) {
-      scheduler_addunlock(sched, t->ci->super_hydro->drift_part, t);
-    }
-
-    /* Self-interaction? */
-    else if (t->type == task_type_self && t->subtype == task_subtype_density) {
-
-      /* Make the self-density tasks depend on the drift only. */
-      scheduler_addunlock(sched, t->ci->super_hydro->drift_part, t);
-
-#ifdef EXTRA_HYDRO_LOOP
-      /* Start by constructing the task for the second  and third hydro loop. */
-      struct task *t2 = scheduler_addtask(
-          sched, task_type_self, task_subtype_gradient, 0, 0, t->ci, NULL);
-      struct task *t3 = scheduler_addtask(
-          sched, task_type_self, task_subtype_force, 0, 0, t->ci, NULL);
-
-      /* Add the link between the new loops and the cell */
-      engine_addlink(e, &t->ci->gradient, t2);
-      engine_addlink(e, &t->ci->force, t3);
-
-      /* Now, build all the dependencies for the hydro */
-      engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->ci,
-                                           with_cooling);
-      scheduler_addunlock(sched, t3, t->ci->super->end_force);
-#else
-
-      /* Start by constructing the task for the second hydro loop */
-      struct task *t2 = scheduler_addtask(
-          sched, task_type_self, task_subtype_force, 0, 0, t->ci, NULL);
-
-      /* Add the link between the new loop and the cell */
-      engine_addlink(e, &t->ci->force, t2);
-
-      /* Now, build all the dependencies for the hydro */
-      engine_make_hydro_loops_dependencies(sched, t, t2, t->ci, with_cooling);
-      scheduler_addunlock(sched, t2, t->ci->super->end_force);
-#endif
-    }
-
-    /* Otherwise, pair interaction? */
-    else if (t->type == task_type_pair && t->subtype == task_subtype_density) {
-
-      /* Make all density tasks depend on the drift and the sorts. */
-      if (t->ci->nodeID == engine_rank)
-        scheduler_addunlock(sched, t->ci->super_hydro->drift_part, t);
-      scheduler_addunlock(sched, t->ci->super_hydro->sorts, t);
-      if (t->ci->super_hydro != t->cj->super_hydro) {
-        if (t->cj->nodeID == engine_rank)
-          scheduler_addunlock(sched, t->cj->super_hydro->drift_part, t);
-        scheduler_addunlock(sched, t->cj->super_hydro->sorts, t);
-      }
-
-#ifdef EXTRA_HYDRO_LOOP
-      /* Start by constructing the task for the second and third hydro loop */
-      struct task *t2 = scheduler_addtask(
-          sched, task_type_pair, task_subtype_gradient, 0, 0, t->ci, t->cj);
-      struct task *t3 = scheduler_addtask(
-          sched, task_type_pair, task_subtype_force, 0, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and both cells */
-      engine_addlink(e, &t->ci->gradient, t2);
-      engine_addlink(e, &t->cj->gradient, t2);
-      engine_addlink(e, &t->ci->force, t3);
-      engine_addlink(e, &t->cj->force, t3);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->ci,
-                                             with_cooling);
-        scheduler_addunlock(sched, t3, t->ci->super->end_force);
-      }
-      if (t->cj->nodeID == nodeID) {
-        if (t->ci->super_hydro != t->cj->super_hydro)
-          engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->cj,
-                                               with_cooling);
-        if (t->ci->super != t->cj->super)
-          scheduler_addunlock(sched, t3, t->cj->super->end_force);
-      }
-
-#else
-
-      /* Start by constructing the task for the second hydro loop */
-      struct task *t2 = scheduler_addtask(
-          sched, task_type_pair, task_subtype_force, 0, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and both cells */
-      engine_addlink(e, &t->ci->force, t2);
-      engine_addlink(e, &t->cj->force, t2);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t->ci, with_cooling);
-        scheduler_addunlock(sched, t2, t->ci->super->end_force);
-      }
-      if (t->cj->nodeID == nodeID) {
-        if (t->ci->super_hydro != t->cj->super_hydro)
-          engine_make_hydro_loops_dependencies(sched, t, t2, t->cj,
-                                               with_cooling);
-        if (t->ci->super != t->cj->super)
-          scheduler_addunlock(sched, t2, t->cj->super->end_force);
-      }
-
-#endif
-
-    }
-
-    /* Otherwise, sub-self interaction? */
-    else if (t->type == task_type_sub_self &&
-             t->subtype == task_subtype_density) {
-
-      /* Make all density tasks depend on the drift and sorts. */
-      scheduler_addunlock(sched, t->ci->super_hydro->drift_part, t);
-      scheduler_addunlock(sched, t->ci->super_hydro->sorts, t);
-
-#ifdef EXTRA_HYDRO_LOOP
-
-      /* Start by constructing the task for the second and third hydro loop */
-      struct task *t2 =
-          scheduler_addtask(sched, task_type_sub_self, task_subtype_gradient,
-                            t->flags, 0, t->ci, t->cj);
-      struct task *t3 =
-          scheduler_addtask(sched, task_type_sub_self, task_subtype_force,
-                            t->flags, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and the cell */
-      engine_addlink(e, &t->ci->gradient, t2);
-      engine_addlink(e, &t->ci->force, t3);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->ci,
-                                             with_cooling);
-        scheduler_addunlock(sched, t3, t->ci->super->end_force);
-      }
-
-#else
-      /* Start by constructing the task for the second hydro loop */
-      struct task *t2 =
-          scheduler_addtask(sched, task_type_sub_self, task_subtype_force,
-                            t->flags, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and the cell */
-      engine_addlink(e, &t->ci->force, t2);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t->ci, with_cooling);
-        scheduler_addunlock(sched, t2, t->ci->super->end_force);
-      }
-#endif
-    }
-
-    /* Otherwise, sub-pair interaction? */
-    else if (t->type == task_type_sub_pair &&
-             t->subtype == task_subtype_density) {
-
-      /* Make all density tasks depend on the drift. */
-      if (t->ci->nodeID == engine_rank)
-        scheduler_addunlock(sched, t->ci->super_hydro->drift_part, t);
-      scheduler_addunlock(sched, t->ci->super_hydro->sorts, t);
-      if (t->ci->super_hydro != t->cj->super_hydro) {
-        if (t->cj->nodeID == engine_rank)
-          scheduler_addunlock(sched, t->cj->super_hydro->drift_part, t);
-        scheduler_addunlock(sched, t->cj->super_hydro->sorts, t);
-      }
-
-#ifdef EXTRA_HYDRO_LOOP
-
-      /* Start by constructing the task for the second and third hydro loop */
-      struct task *t2 =
-          scheduler_addtask(sched, task_type_sub_pair, task_subtype_gradient,
-                            t->flags, 0, t->ci, t->cj);
-      struct task *t3 =
-          scheduler_addtask(sched, task_type_sub_pair, task_subtype_force,
-                            t->flags, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and both cells */
-      engine_addlink(e, &t->ci->gradient, t2);
-      engine_addlink(e, &t->cj->gradient, t2);
-      engine_addlink(e, &t->ci->force, t3);
-      engine_addlink(e, &t->cj->force, t3);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->ci,
-                                             with_cooling);
-        scheduler_addunlock(sched, t3, t->ci->super->end_force);
-      }
-      if (t->cj->nodeID == nodeID) {
-        if (t->ci->super_hydro != t->cj->super_hydro)
-          engine_make_hydro_loops_dependencies(sched, t, t2, t3, t->cj,
-                                               with_cooling);
-        if (t->ci->super != t->cj->super)
-          scheduler_addunlock(sched, t3, t->cj->super->end_force);
-      }
-
-#else
-      /* Start by constructing the task for the second hydro loop */
-      struct task *t2 =
-          scheduler_addtask(sched, task_type_sub_pair, task_subtype_force,
-                            t->flags, 0, t->ci, t->cj);
-
-      /* Add the link between the new loop and both cells */
-      engine_addlink(e, &t->ci->force, t2);
-      engine_addlink(e, &t->cj->force, t2);
-
-      /* Now, build all the dependencies for the hydro for the cells */
-      /* that are local and are not descendant of the same super_hydro-cells */
-      if (t->ci->nodeID == nodeID) {
-        engine_make_hydro_loops_dependencies(sched, t, t2, t->ci, with_cooling);
-        scheduler_addunlock(sched, t2, t->ci->super->end_force);
-      }
-      if (t->cj->nodeID == nodeID) {
-        if (t->ci->super_hydro != t->cj->super_hydro)
-          engine_make_hydro_loops_dependencies(sched, t, t2, t->cj,
-                                               with_cooling);
-        if (t->ci->super != t->cj->super)
-          scheduler_addunlock(sched, t2, t->cj->super->end_force);
-      }
-#endif
-    }
-  }
-}
-
-/**
- * @brief Fill the #space's task list.
- *
- * @param e The #engine we are working with.
- */
-void engine_maketasks(struct engine *e) {
-
-  struct space *s = e->s;
-  struct scheduler *sched = &e->sched;
-  struct cell *cells = s->cells_top;
-  const int nr_cells = s->nr_cells;
-  const ticks tic = getticks();
-
-  /* Re-set the scheduler. */
-  scheduler_reset(sched, engine_estimate_nr_tasks(e));
-
-  ticks tic2 = getticks();
-
-  /* Construct the firt hydro loop over neighbours */
-  if (e->policy & engine_policy_hydro)
-    threadpool_map(&e->threadpool, engine_make_hydroloop_tasks_mapper, NULL,
-                   s->nr_cells, 1, 0, e);
-
-  if (e->verbose)
-    message("Making hydro tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  tic2 = getticks();
-
-  /* Add the self gravity tasks. */
-  if (e->policy & engine_policy_self_gravity) engine_make_self_gravity_tasks(e);
-
-  if (e->verbose)
-    message("Making gravity tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  /* Add the external gravity tasks. */
-  if (e->policy & engine_policy_external_gravity)
-    engine_make_external_gravity_tasks(e);
-
-  if (e->sched.nr_tasks == 0 && (s->nr_gparts > 0 || s->nr_parts > 0))
-    error("We have particles but no hydro or gravity tasks were created.");
-
-  /* Free the old list of cell-task links. */
-  if (e->links != NULL) free(e->links);
-  e->size_links = 0;
-
-/* The maximum number of links is the
- * number of cells (s->tot_cells) times the number of neighbours (26) times
- * the number of interaction types, so 26 * 2 (density, force) pairs
- * and 2 (density, force) self.
- */
-#ifdef EXTRA_HYDRO_LOOP
-  const size_t hydro_tasks_per_cell = 27 * 3;
-#else
-  const size_t hydro_tasks_per_cell = 27 * 2;
-#endif
-  const size_t self_grav_tasks_per_cell = 125;
-  const size_t ext_grav_tasks_per_cell = 1;
-
-  if (e->policy & engine_policy_hydro)
-    e->size_links += s->tot_cells * hydro_tasks_per_cell;
-  if (e->policy & engine_policy_external_gravity)
-    e->size_links += s->tot_cells * ext_grav_tasks_per_cell;
-  if (e->policy & engine_policy_self_gravity)
-    e->size_links += s->tot_cells * self_grav_tasks_per_cell;
-
-  /* Allocate the new link list */
-  if ((e->links = (struct link *)malloc(sizeof(struct link) * e->size_links)) ==
-      NULL)
-    error("Failed to allocate cell-task links.");
-  e->nr_links = 0;
-
-  tic2 = getticks();
-
-  /* Split the tasks. */
-  scheduler_splittasks(sched);
-
-  if (e->verbose)
-    message("Splitting tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-#ifdef SWIFT_DEBUG_CHECKS
-  /* Verify that we are not left with invalid tasks */
-  for (int i = 0; i < e->sched.nr_tasks; ++i) {
-    const struct task *t = &e->sched.tasks[i];
-    if (t->ci == NULL && t->cj != NULL && !t->skip) error("Invalid task");
-  }
-#endif
-
-  tic2 = getticks();
-
-  /* Count the number of tasks associated with each cell and
-     store the density tasks in each cell, and make each sort
-     depend on the sorts of its progeny. */
-  threadpool_map(&e->threadpool, engine_count_and_link_tasks_mapper,
-                 sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
-
-  if (e->verbose)
-    message("Counting and linking tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  tic2 = getticks();
-
-  /* Re-set the tag counter. MPI tags are defined for top-level cells in
-   * cell_set_super_mapper. */
-#ifdef WITH_MPI
-  cell_next_tag = 0;
-#endif
-
-  /* Now that the self/pair tasks are at the right level, set the super
-   * pointers. */
-  threadpool_map(&e->threadpool, cell_set_super_mapper, cells, nr_cells,
-                 sizeof(struct cell), 0, e);
-
   if (e->verbose)
-    message("Setting super-pointers took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  /* Append hierarchical tasks to each cell. */
-  threadpool_map(&e->threadpool, engine_make_hierarchical_tasks_mapper, cells,
-                 nr_cells, sizeof(struct cell), 0, e);
+    message("Counting number of foreign particles took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
 
-  tic2 = getticks();
+  tic = getticks();
 
-  /* Run through the tasks and make force tasks for each density task.
-     Each force task depends on the cell ghosts and unlocks the kick task
-     of its super-cell. */
-  if (e->policy & engine_policy_hydro)
-    threadpool_map(&e->threadpool, engine_make_extra_hydroloop_tasks_mapper,
-                   sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
+  /* Allocate space for the foreign particles we will receive */
+  if (count_parts_in > s->size_parts_foreign) {
+    if (s->parts_foreign != NULL) free(s->parts_foreign);
+    s->size_parts_foreign = engine_foreign_alloc_margin * count_parts_in;
+    if (posix_memalign((void **)&s->parts_foreign, part_align,
+                       sizeof(struct part) * s->size_parts_foreign) != 0)
+      error("Failed to allocate foreign part data.");
+  }
+  /* Allocate space for the foreign particles we will receive */
+  if (count_gparts_in > s->size_gparts_foreign) {
+    if (s->gparts_foreign != NULL) free(s->gparts_foreign);
+    s->size_gparts_foreign = engine_foreign_alloc_margin * count_gparts_in;
+    if (posix_memalign((void **)&s->gparts_foreign, gpart_align,
+                       sizeof(struct gpart) * s->size_gparts_foreign) != 0)
+      error("Failed to allocate foreign gpart data.");
+  }
+  /* Allocate space for the foreign particles we will receive */
+  if (count_sparts_in > s->size_sparts_foreign) {
+    if (s->sparts_foreign != NULL) free(s->sparts_foreign);
+    s->size_sparts_foreign = engine_foreign_alloc_margin * count_sparts_in;
+    if (posix_memalign((void **)&s->sparts_foreign, spart_align,
+                       sizeof(struct spart) * s->size_sparts_foreign) != 0)
+      error("Failed to allocate foreign spart data.");
+  }
 
   if (e->verbose)
-    message("Making extra hydroloop tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+    message("Allocating %zd/%zd/%zd foreign part/gpart/spart (%zd/%zd/%zd MB)",
+            s->size_parts_foreign, s->size_gparts_foreign,
+            s->size_sparts_foreign,
+            s->size_parts_foreign * sizeof(struct part) / (1024 * 1024),
+            s->size_gparts_foreign * sizeof(struct gpart) / (1024 * 1024),
+            s->size_sparts_foreign * sizeof(struct spart) / (1024 * 1024));
 
-  tic2 = getticks();
+  /* Unpack the cells and link to the particle data. */
+  struct part *parts = s->parts_foreign;
+  struct gpart *gparts = s->gparts_foreign;
+  struct spart *sparts = s->sparts_foreign;
+  for (int k = 0; k < nr_proxies; k++) {
+    for (int j = 0; j < e->proxies[k].nr_cells_in; j++) {
 
-  /* Add the dependencies for the gravity stuff */
-  if (e->policy & (engine_policy_self_gravity | engine_policy_external_gravity))
-    engine_link_gravity_tasks(e);
+      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_hydro) {
 
-  if (e->verbose)
-    message("Linking gravity tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+        const size_t count_parts =
+            cell_link_foreign_parts(e->proxies[k].cells_in[j], parts);
+        parts = &parts[count_parts];
+      }
 
-#ifdef WITH_MPI
+      if (e->proxies[k].cells_in_type[j] & proxy_cell_type_gravity) {
 
-  /* Add the communication tasks if MPI is being used. */
-  if (e->policy & engine_policy_mpi) {
-
-    /* Loop over the proxies and add the send tasks, which also generates the
-     * cell tags for super-cells. */
-    for (int pid = 0; pid < e->nr_proxies; pid++) {
-
-      /* Get a handle on the proxy. */
-      struct proxy *p = &e->proxies[pid];
-
-      for (int k = 0; k < p->nr_cells_out; k++)
-        engine_addtasks_send_timestep(e, p->cells_out[k], p->cells_in[0], NULL);
-
-      /* Loop through the proxy's outgoing cells and add the
-         send tasks for the cells in the proxy that have a hydro connection. */
-      if (e->policy & engine_policy_hydro)
-        for (int k = 0; k < p->nr_cells_out; k++)
-          if (p->cells_out_type[k] & proxy_cell_type_hydro)
-            engine_addtasks_send_hydro(e, p->cells_out[k], p->cells_in[0], NULL,
-                                       NULL, NULL);
-
-      /* Loop through the proxy's outgoing cells and add the
-         send tasks for the cells in the proxy that have a gravity connection.
-         */
-      if (e->policy & engine_policy_self_gravity)
-        for (int k = 0; k < p->nr_cells_out; k++)
-          if (p->cells_out_type[k] & proxy_cell_type_gravity)
-            engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0],
-                                         NULL);
-    }
+        const size_t count_gparts =
+            cell_link_foreign_gparts(e->proxies[k].cells_in[j], gparts);
+        gparts = &gparts[count_gparts];
+      }
 
-    /* Exchange the cell tags. */
-    proxy_tags_exchange(e->proxies, e->nr_proxies, s);
-
-    /* Loop over the proxies and add the recv tasks, which relies on having the
-     * cell tags. */
-    for (int pid = 0; pid < e->nr_proxies; pid++) {
-
-      /* Get a handle on the proxy. */
-      struct proxy *p = &e->proxies[pid];
-
-      for (int k = 0; k < p->nr_cells_in; k++)
-        engine_addtasks_recv_timestep(e, p->cells_in[k], NULL);
-
-      /* Loop through the proxy's incoming cells and add the
-         recv tasks for the cells in the proxy that have a hydro connection. */
-      if (e->policy & engine_policy_hydro)
-        for (int k = 0; k < p->nr_cells_in; k++)
-          if (p->cells_in_type[k] & proxy_cell_type_hydro)
-            engine_addtasks_recv_hydro(e, p->cells_in[k], NULL, NULL, NULL);
-
-      /* Loop through the proxy's incoming cells and add the
-         recv tasks for the cells in the proxy that have a gravity connection.
-         */
-      if (e->policy & engine_policy_self_gravity)
-        for (int k = 0; k < p->nr_cells_in; k++)
-          if (p->cells_in_type[k] & proxy_cell_type_gravity)
-            engine_addtasks_recv_gravity(e, p->cells_in[k], NULL);
+      /* For stars, we just use the numbers in the top-level cells */
+      cell_link_sparts(e->proxies[k].cells_in[j], sparts);
+      sparts = &sparts[e->proxies[k].cells_in[j]->stars.count];
     }
   }
-#endif
-
-  tic2 = getticks();
-
-  /* Set the unlocks per task. */
-  scheduler_set_unlocks(sched);
 
-  if (e->verbose)
-    message("Setting unlocks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  tic2 = getticks();
-
-  /* Rank the tasks. */
-  scheduler_ranktasks(sched);
-
-  if (e->verbose)
-    message("Ranking the tasks took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
-  /* Weight the tasks. */
-  scheduler_reweight(sched, e->verbose);
-
-  /* Set the tasks age. */
-  e->tasks_age = 0;
+  /* Update the counters */
+  s->nr_parts_foreign = parts - s->parts_foreign;
+  s->nr_gparts_foreign = gparts - s->gparts_foreign;
+  s->nr_sparts_foreign = sparts - s->sparts_foreign;
 
   if (e->verbose)
-    message("took %.3f %s (including reweight).",
+    message("Recursively linking foreign arrays took %.3f %s.",
             clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
 }
 
 /**
- * @brief Mark tasks to be un-skipped and set the sort flags accordingly.
- *        Threadpool mapper function.
+ * @brief Prints the number of tasks in the engine
  *
- * @param map_data pointer to the tasks
- * @param num_elements number of tasks
- * @param extra_data pointer to int that will define if a rebuild is needed.
+ * @param e The #engine.
  */
-void engine_marktasks_mapper(void *map_data, int num_elements,
-                             void *extra_data) {
-  /* Unpack the arguments. */
-  struct task *tasks = (struct task *)map_data;
-  size_t *rebuild_space = &((size_t *)extra_data)[1];
-  struct scheduler *s = (struct scheduler *)(((size_t *)extra_data)[2]);
-  struct engine *e = (struct engine *)((size_t *)extra_data)[0];
-
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct task *t = &tasks[ind];
-
-    /* Single-cell task? */
-    if (t->type == task_type_self || t->type == task_type_sub_self) {
-
-      /* Local pointer. */
-      struct cell *ci = t->ci;
-
-      if (ci->nodeID != engine_rank) error("Non-local self task found");
-
-      /* Activate the hydro drift */
-      if (t->type == task_type_self && t->subtype == task_subtype_density) {
-        if (cell_is_active_hydro(ci, e)) {
-          scheduler_activate(s, t);
-          cell_activate_drift_part(ci, s);
-        }
-      }
-
-      /* Store current values of dx_max and h_max. */
-      else if (t->type == task_type_sub_self &&
-               t->subtype == task_subtype_density) {
-        if (cell_is_active_hydro(ci, e)) {
-          scheduler_activate(s, t);
-          cell_activate_subcell_hydro_tasks(ci, NULL, s);
-        }
-      }
-
-      else if (t->type == task_type_self && t->subtype == task_subtype_force) {
-        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
-      }
-
-      else if (t->type == task_type_sub_self &&
-               t->subtype == task_subtype_force) {
-        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
-      }
-
-#ifdef EXTRA_HYDRO_LOOP
-      else if (t->type == task_type_self &&
-               t->subtype == task_subtype_gradient) {
-        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
-      }
-
-      else if (t->type == task_type_sub_self &&
-               t->subtype == task_subtype_gradient) {
-        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
-      }
-#endif
-
-      /* Activate the gravity drift */
-      else if (t->type == task_type_self && t->subtype == task_subtype_grav) {
-        if (cell_is_active_gravity(ci, e)) {
-          scheduler_activate(s, t);
-          cell_activate_subcell_grav_tasks(t->ci, NULL, s);
-        }
-      }
-
-      /* Activate the gravity drift */
-      else if (t->type == task_type_self &&
-               t->subtype == task_subtype_external_grav) {
-        if (cell_is_active_gravity(ci, e)) {
-          scheduler_activate(s, t);
-          cell_activate_drift_gpart(t->ci, s);
-        }
-      }
-
-#ifdef SWIFT_DEBUG_CHECKS
-      else {
-        error("Invalid task type / sub-type encountered");
-      }
-#endif
-    }
-
-    /* Pair? */
-    else if (t->type == task_type_pair || t->type == task_type_sub_pair) {
-
-      /* Local pointers. */
-      struct cell *ci = t->ci;
-      struct cell *cj = t->cj;
-      const int ci_active_hydro = cell_is_active_hydro(ci, e);
-      const int cj_active_hydro = cell_is_active_hydro(cj, e);
-      const int ci_active_gravity = cell_is_active_gravity(ci, e);
-      const int cj_active_gravity = cell_is_active_gravity(cj, e);
-
-      /* Only activate tasks that involve a local active cell. */
-      if ((t->subtype == task_subtype_density ||
-           t->subtype == task_subtype_gradient ||
-           t->subtype == task_subtype_force) &&
-          ((ci_active_hydro && ci->nodeID == engine_rank) ||
-           (cj_active_hydro && cj->nodeID == engine_rank))) {
-
-        scheduler_activate(s, t);
-
-        /* Set the correct sorting flags */
-        if (t->type == task_type_pair && t->subtype == task_subtype_density) {
-
-          /* Store some values. */
-          atomic_or(&ci->requires_sorts, 1 << t->flags);
-          atomic_or(&cj->requires_sorts, 1 << t->flags);
-          ci->dx_max_sort_old = ci->dx_max_sort;
-          cj->dx_max_sort_old = cj->dx_max_sort;
-
-          /* Activate the hydro drift tasks. */
-          if (ci->nodeID == engine_rank) cell_activate_drift_part(ci, s);
-          if (cj->nodeID == engine_rank) cell_activate_drift_part(cj, s);
-
-          /* Check the sorts and activate them if needed. */
-          cell_activate_sorts(ci, t->flags, s);
-          cell_activate_sorts(cj, t->flags, s);
-
-        }
-
-        /* Store current values of dx_max and h_max. */
-        else if (t->type == task_type_sub_pair &&
-                 t->subtype == task_subtype_density) {
-          cell_activate_subcell_hydro_tasks(t->ci, t->cj, s);
-        }
-      }
-
-      if ((t->subtype == task_subtype_grav) &&
-          ((ci_active_gravity && ci->nodeID == engine_rank) ||
-           (cj_active_gravity && cj->nodeID == engine_rank))) {
+void engine_print_task_counts(const struct engine *e) {
 
-        scheduler_activate(s, t);
-
-        if (t->type == task_type_pair && t->subtype == task_subtype_grav) {
-          /* Activate the gravity drift */
-          cell_activate_subcell_grav_tasks(t->ci, t->cj, s);
-        }
-
-        else if (t->type == task_type_sub_pair &&
-                 t->subtype == task_subtype_grav) {
-          error("Invalid task sub-type encountered");
-        }
-      }
-
-      /* Only interested in density tasks as of here. */
-      if (t->subtype == task_subtype_density) {
-
-        /* Too much particle movement? */
-        if (cell_need_rebuild_for_pair(ci, cj)) *rebuild_space = 1;
+  const ticks tic = getticks();
+  const struct scheduler *sched = &e->sched;
+  const int nr_tasks = sched->nr_tasks;
+  const struct task *const tasks = sched->tasks;
 
+  /* Global tasks and cells when using MPI. */
 #ifdef WITH_MPI
-        /* Activate the send/recv tasks. */
-        if (ci->nodeID != engine_rank) {
-
-          /* If the local cell is active, receive data from the foreign cell. */
-          if (cj_active_hydro) {
-            scheduler_activate(s, ci->recv_xv);
-            if (ci_active_hydro) {
-              scheduler_activate(s, ci->recv_rho);
-#ifdef EXTRA_HYDRO_LOOP
-              scheduler_activate(s, ci->recv_gradient);
-#endif
-            }
-          }
-
-          /* If the foreign cell is active, we want its ti_end values. */
-          if (ci_active_hydro) scheduler_activate(s, ci->recv_ti);
-
-          /* Is the foreign cell active and will need stuff from us? */
-          if (ci_active_hydro) {
-
-            struct link *l =
-                scheduler_activate_send(s, cj->send_xv, ci->nodeID);
-
-            /* Drift the cell which will be sent at the level at which it is
-               sent, i.e. drift the cell specified in the send task (l->t)
-               itself. */
-            cell_activate_drift_part(l->t->ci, s);
-
-            /* If the local cell is also active, more stuff will be needed. */
-            if (cj_active_hydro) {
-              scheduler_activate_send(s, cj->send_rho, ci->nodeID);
-
-#ifdef EXTRA_HYDRO_LOOP
-              scheduler_activate_send(s, cj->send_gradient, ci->nodeID);
-#endif
-            }
-          }
-
-          /* If the local cell is active, send its ti_end values. */
-          if (cj_active_hydro)
-            scheduler_activate_send(s, cj->send_ti, ci->nodeID);
-
-        } else if (cj->nodeID != engine_rank) {
-
-          /* If the local cell is active, receive data from the foreign cell. */
-          if (ci_active_hydro) {
-            scheduler_activate(s, cj->recv_xv);
-            if (cj_active_hydro) {
-              scheduler_activate(s, cj->recv_rho);
-#ifdef EXTRA_HYDRO_LOOP
-              scheduler_activate(s, cj->recv_gradient);
-#endif
-            }
-          }
-
-          /* If the foreign cell is active, we want its ti_end values. */
-          if (cj_active_hydro) scheduler_activate(s, cj->recv_ti);
-
-          /* Is the foreign cell active and will need stuff from us? */
-          if (cj_active_hydro) {
-
-            struct link *l =
-                scheduler_activate_send(s, ci->send_xv, cj->nodeID);
-
-            /* Drift the cell which will be sent at the level at which it is
-               sent, i.e. drift the cell specified in the send task (l->t)
-               itself. */
-            cell_activate_drift_part(l->t->ci, s);
-
-            /* If the local cell is also active, more stuff will be needed. */
-            if (ci_active_hydro) {
-
-              scheduler_activate_send(s, ci->send_rho, cj->nodeID);
-
-#ifdef EXTRA_HYDRO_LOOP
-              scheduler_activate_send(s, ci->send_gradient, cj->nodeID);
-#endif
-            }
-          }
-
-          /* If the local cell is active, send its ti_end values. */
-          if (ci_active_hydro)
-            scheduler_activate_send(s, ci->send_ti, cj->nodeID);
-        }
+  if (e->nodeID == 0 && e->total_nr_tasks > 0)
+    printf(
+        "[%04i] %s engine_print_task_counts: System total: %lld,"
+        " no. cells: %lld\n",
+        e->nodeID, clocks_get_timesincestart(), e->total_nr_tasks,
+        e->total_nr_cells);
+  fflush(stdout);
 #endif
-      }
 
-      /* Only interested in gravity tasks as of here. */
-      if (t->subtype == task_subtype_grav) {
+  /* Report value that can be used to estimate the task_per_cells parameter. */
+  float tasks_per_cell = (float)nr_tasks / (float)e->s->tot_cells;
 
 #ifdef WITH_MPI
-        /* Activate the send/recv tasks. */
-        if (ci->nodeID != engine_rank) {
-
-          /* If the local cell is active, receive data from the foreign cell. */
-          if (cj_active_gravity) {
-            scheduler_activate(s, ci->recv_grav);
-          }
-
-          /* If the foreign cell is active, we want its ti_end values. */
-          if (ci_active_gravity) scheduler_activate(s, ci->recv_ti);
-
-          /* Is the foreign cell active and will need stuff from us? */
-          if (ci_active_gravity) {
-
-            struct link *l =
-                scheduler_activate_send(s, cj->send_grav, ci->nodeID);
-
-            /* Drift the cell which will be sent at the level at which it is
-               sent, i.e. drift the cell specified in the send task (l->t)
-               itself. */
-            cell_activate_drift_gpart(l->t->ci, s);
-          }
-
-          /* If the local cell is active, send its ti_end values. */
-          if (cj_active_gravity)
-            scheduler_activate_send(s, cj->send_ti, ci->nodeID);
-
-        } else if (cj->nodeID != engine_rank) {
-
-          /* If the local cell is active, receive data from the foreign cell. */
-          if (ci_active_gravity) {
-            scheduler_activate(s, cj->recv_grav);
-          }
-
-          /* If the foreign cell is active, we want its ti_end values. */
-          if (cj_active_gravity) scheduler_activate(s, cj->recv_ti);
-
-          /* Is the foreign cell active and will need stuff from us? */
-          if (cj_active_gravity) {
-
-            struct link *l =
-                scheduler_activate_send(s, ci->send_grav, cj->nodeID);
-
-            /* Drift the cell which will be sent at the level at which it is
-               sent, i.e. drift the cell specified in the send task (l->t)
-               itself. */
-            cell_activate_drift_gpart(l->t->ci, s);
-          }
-
-          /* If the local cell is active, send its ti_end values. */
-          if (ci_active_gravity)
-            scheduler_activate_send(s, ci->send_ti, cj->nodeID);
-        }
-#endif
-      }
-    }
-
-    /* End force ? */
-    else if (t->type == task_type_end_force) {
-
-      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
-        scheduler_activate(s, t);
-    }
-
-    /* Kick ? */
-    else if (t->type == task_type_kick1 || t->type == task_type_kick2) {
-
-      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
-        scheduler_activate(s, t);
-    }
-
-    /* Hydro ghost tasks ? */
-    else if (t->type == task_type_ghost || t->type == task_type_extra_ghost ||
-             t->type == task_type_ghost_in || t->type == task_type_ghost_out) {
-      if (cell_is_active_hydro(t->ci, e)) scheduler_activate(s, t);
-    }
-
-    /* Gravity stuff ? */
-    else if (t->type == task_type_grav_down || t->type == task_type_grav_mesh ||
-             t->type == task_type_grav_long_range ||
-             t->type == task_type_init_grav ||
-             t->type == task_type_init_grav_out ||
-             t->type == task_type_grav_down_in) {
-      if (cell_is_active_gravity(t->ci, e)) scheduler_activate(s, t);
-    }
-
-    else if (t->type == task_type_grav_mm) {
-
-      /* Local pointers. */
-      const struct cell *ci = t->ci;
-      const struct cell *cj = t->cj;
-      const int ci_nodeID = ci->nodeID;
-      const int cj_nodeID = cj->nodeID;
-      const int ci_active_gravity = cell_is_active_gravity(ci, e);
-      const int cj_active_gravity = cell_is_active_gravity(cj, e);
+  message("Total = %d (per cell = %.2f)", nr_tasks, tasks_per_cell);
 
-      if ((ci_active_gravity && ci_nodeID == engine_rank) ||
-          (cj_active_gravity && cj_nodeID == engine_rank))
-        scheduler_activate(s, t);
-    }
-
-    /* Time-step? */
-    else if (t->type == task_type_timestep) {
-      t->ci->updated = 0;
-      t->ci->g_updated = 0;
-      t->ci->s_updated = 0;
-      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
-        scheduler_activate(s, t);
-    }
-
-    /* Subgrid tasks */
-    else if (t->type == task_type_cooling || t->type == task_type_sourceterms) {
-      if (cell_is_active_hydro(t->ci, e)) scheduler_activate(s, t);
-    }
+  /* And the system maximum on rank 0, only after first step, increase by our
+   * margin to allow for some variation in repartitioning. */
+  if (e->nodeID == 0 && e->total_nr_tasks > 0) {
+    message("Total = %d (maximum per cell = %.2f)", nr_tasks,
+            e->tasks_per_cell_max * engine_tasks_per_cell_margin);
   }
-}
-
-/**
- * @brief Mark tasks to be un-skipped and set the sort flags accordingly.
- *
- * @return 1 if the space has to be rebuilt, 0 otherwise.
- */
-int engine_marktasks(struct engine *e) {
-
-  struct scheduler *s = &e->sched;
-  const ticks tic = getticks();
-  int rebuild_space = 0;
-
-  /* Run through the tasks and mark as skip or not. */
-  size_t extra_data[3] = {(size_t)e, (size_t)rebuild_space, (size_t)&e->sched};
-  threadpool_map(&e->threadpool, engine_marktasks_mapper, s->tasks, s->nr_tasks,
-                 sizeof(struct task), 0, extra_data);
-  rebuild_space = extra_data[1];
-
-  if (e->verbose)
-    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
-            clocks_getunit());
-
-  /* All is well... */
-  return rebuild_space;
-}
-
-/**
- * @brief Prints the number of tasks in the engine
- *
- * @param e The #engine.
- */
-void engine_print_task_counts(struct engine *e) {
 
-  const ticks tic = getticks();
-  struct scheduler *const sched = &e->sched;
-  const int nr_tasks = sched->nr_tasks;
-  const struct task *const tasks = sched->tasks;
+#else
+  message("Total = %d (per cell = %.2f)", nr_tasks, tasks_per_cell);
+#endif
+  fflush(stdout);
 
   /* Count and print the number of each task type. */
   int counts[task_type_count + 1];
@@ -3797,8 +1959,7 @@ void engine_print_task_counts(struct engine *e) {
     else
       counts[(int)tasks[k].type] += 1;
   }
-  message("Total = %d  (per cell = %d)", nr_tasks,
-          (int)ceil((double)nr_tasks / e->s->tot_cells));
+
 #ifdef WITH_MPI
   printf("[%04i] %s engine_print_task_counts: task counts are [ %s=%i",
          e->nodeID, clocks_get_timesincestart(), taskID_names[0], counts[0]);
@@ -3823,7 +1984,7 @@ void engine_print_task_counts(struct engine *e) {
  * @brief if necessary, estimate the number of tasks required given
  *        the current tasks in use and the numbers of cells.
  *
- * If e->tasks_per_cell is set greater than 0 then that value is used
+ * If e->tasks_per_cell is set greater than 0.0 then that value is used
  * as the estimate of the average number of tasks per cell,
  * otherwise we attempt an estimate.
  *
@@ -3831,20 +1992,28 @@ void engine_print_task_counts(struct engine *e) {
  *
  * @return the estimated total number of tasks
  */
-int engine_estimate_nr_tasks(struct engine *e) {
+int engine_estimate_nr_tasks(const struct engine *e) {
 
-  int tasks_per_cell = e->tasks_per_cell;
-  if (tasks_per_cell > 0) return e->s->tot_cells * tasks_per_cell;
+  float tasks_per_cell = e->tasks_per_cell;
+  if (tasks_per_cell > 0.0f) {
+    if (e->verbose)
+      message("tasks per cell given as: %.2f, so maximum tasks: %d",
+              e->tasks_per_cell, (int)(e->s->tot_cells * tasks_per_cell));
+    return (int)(e->s->tot_cells * tasks_per_cell);
+  }
 
   /* Our guess differs depending on the types of tasks we are using, but we
    * basically use a formula <n1>*ntopcells + <n2>*(totcells - ntopcells).
    * Where <n1> is the expected maximum tasks per top-level/super cell, and
    * <n2> the expected maximum tasks for all other cells. These should give
-   * a safe upper limit.
-   */
+   * a safe upper limit. */
   int n1 = 0;
   int n2 = 0;
   if (e->policy & engine_policy_hydro) {
+    /* 2 self (density, force), 1 sort, 26/2 density pairs
+       26/2 force pairs, 1 drift, 3 ghosts, 2 kicks, 1 time-step,
+       1 end_force, 2 extra space
+     */
     n1 += 37;
     n2 += 2;
 #ifdef WITH_MPI
@@ -3858,6 +2027,10 @@ int engine_estimate_nr_tasks(struct engine *e) {
 #endif
 #endif
   }
+  if (e->policy & engine_policy_limiter) {
+    n1 += 18;
+    n2 += 1;
+  }
   if (e->policy & engine_policy_self_gravity) {
     n1 += 125;
     n2 += 8;
@@ -3872,14 +2045,27 @@ int engine_estimate_nr_tasks(struct engine *e) {
     n1 += 2;
   }
   if (e->policy & engine_policy_cooling) {
+    /* Cooling task + extra space */
     n1 += 2;
   }
-  if (e->policy & engine_policy_sourceterms) {
-    n1 += 2;
+  if (e->policy & engine_policy_star_formation) {
+    n1 += 1;
   }
   if (e->policy & engine_policy_stars) {
-    n1 += 2;
+    /* 2 self (density, feedback), 1 sort, 26/2 density pairs
+       26/2 feedback pairs, 1 drift, 3 ghosts, 2 kicks, 1 time-step,
+       1 end_force, 2 extra space
+     */
+    n1 += 37;
+    n2 += 2;
+#ifdef WITH_MPI
+    n1 += 6;
+#endif
   }
+#if defined(WITH_LOGGER)
+  /* each cell logs its particles */
+  n1 += 1;
+#endif
 
 #ifdef WITH_MPI
 
@@ -3893,7 +2079,7 @@ int engine_estimate_nr_tasks(struct engine *e) {
     struct cell *c = &e->s->cells_top[k];
 
     /* Any cells with particles will have tasks (local & foreign). */
-    int nparts = c->count + c->gcount + c->scount;
+    int nparts = c->hydro.count + c->grav.count + c->stars.count;
     if (nparts > 0) {
       ntop++;
       ncells++;
@@ -3919,25 +2105,27 @@ int engine_estimate_nr_tasks(struct engine *e) {
   int ncells = e->s->tot_cells;
 #endif
 
-  double ntasks = n1 * ntop + n2 * (ncells - ntop);
+  float ntasks = n1 * ntop + n2 * (ncells - ntop);
   if (ncells > 0) tasks_per_cell = ceil(ntasks / ncells);
 
-  if (tasks_per_cell < 1.0) tasks_per_cell = 1.0;
+  if (tasks_per_cell < 1.0f) tasks_per_cell = 1.0f;
   if (e->verbose)
-    message("tasks per cell estimated as: %d, maximum tasks: %d",
-            tasks_per_cell, ncells * tasks_per_cell);
+    message("tasks per cell estimated as: %.2f, maximum tasks: %d",
+            tasks_per_cell, (int)(ncells * tasks_per_cell));
 
-  return ncells * tasks_per_cell;
+  return (int)(ncells * tasks_per_cell);
 }
 
 /**
  * @brief Rebuild the space and tasks.
  *
  * @param e The #engine.
+ * @param repartitioned Did we just redistribute?
  * @param clean_smoothing_length_values Are we cleaning up the values of
  * the smoothing lengths before building the tasks ?
  */
-void engine_rebuild(struct engine *e, int clean_smoothing_length_values) {
+void engine_rebuild(struct engine *e, int repartitioned,
+                    int clean_smoothing_length_values) {
 
   const ticks tic = getticks();
 
@@ -3946,11 +2134,52 @@ void engine_rebuild(struct engine *e, int clean_smoothing_length_values) {
   e->restarting = 0;
 
   /* Re-build the space. */
-  space_rebuild(e->s, e->verbose);
+  space_rebuild(e->s, repartitioned, e->verbose);
+
+  /* Report the number of cells and memory */
+  if (e->verbose)
+    message(
+        "Nr. of top-level cells: %d Nr. of local cells: %d memory use: %zd MB.",
+        e->s->nr_cells, e->s->tot_cells,
+        (e->s->nr_cells + e->s->tot_cells) * sizeof(struct cell) /
+            (1024 * 1024));
+
+  /* Report the number of multipoles and memory */
+  if (e->verbose && (e->policy & engine_policy_self_gravity))
+    message(
+        "Nr. of top-level mpoles: %d Nr. of local mpoles: %d memory use: %zd "
+        "MB.",
+        e->s->nr_cells, e->s->tot_cells,
+        (e->s->nr_cells + e->s->tot_cells) * sizeof(struct gravity_tensors) /
+            (1024 * 1024));
+
+  const ticks tic2 = getticks();
+
+  /* Update the global counters of particles */
+  long long num_particles[3] = {
+      (long long)(e->s->nr_parts - e->s->nr_extra_parts),
+      (long long)(e->s->nr_gparts - e->s->nr_extra_gparts),
+      (long long)(e->s->nr_sparts - e->s->nr_extra_sparts)};
+#ifdef WITH_MPI
+  MPI_Allreduce(MPI_IN_PLACE, num_particles, 3, MPI_LONG_LONG, MPI_SUM,
+                MPI_COMM_WORLD);
+#endif
+  e->total_nr_parts = num_particles[0];
+  e->total_nr_gparts = num_particles[1];
+  e->total_nr_sparts = num_particles[2];
+
+  /* Flag that there are no inhibited particles */
+  e->nr_inhibited_parts = 0;
+  e->nr_inhibited_gparts = 0;
+  e->nr_inhibited_sparts = 0;
+
+  if (e->verbose)
+    message("updating particle counts took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
 
   /* Re-compute the mesh forces */
   if ((e->policy & engine_policy_self_gravity) && e->s->periodic)
-    pm_mesh_compute_potential(e->mesh, e->s, e->verbose);
+    pm_mesh_compute_potential(e->mesh, e->s, &e->threadpool, e->verbose);
 
   /* Re-compute the maximal RMS displacement constraint */
   if (e->policy & engine_policy_cosmology)
@@ -3967,21 +2196,31 @@ void engine_rebuild(struct engine *e, int clean_smoothing_length_values) {
 /* If in parallel, exchange the cell structure, top-level and neighbouring
  * multipoles. */
 #ifdef WITH_MPI
+  if (e->policy & engine_policy_self_gravity) engine_exchange_top_multipoles(e);
+
   engine_exchange_cells(e);
+#endif
 
-  if (e->policy & engine_policy_self_gravity) engine_exchange_top_multipoles(e);
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Let's check that what we received makes sense */
+  if (e->policy & engine_policy_self_gravity) {
+    long long counter = 0;
+
+    for (int i = 0; i < e->s->nr_cells; ++i) {
+      const struct gravity_tensors *m = &e->s->multipoles_top[i];
+      counter += m->m_pole.num_gpart;
+    }
+    if (counter != e->total_nr_gparts)
+      error("Total particles in multipoles inconsistent with engine");
+  }
 #endif
 
   /* Re-build the tasks. */
   engine_maketasks(e);
 
-#ifdef WITH_MPI
-  if (e->policy & engine_policy_self_gravity)
-    engine_exchange_proxy_multipoles(e);
-#endif
-
   /* Make the list of top-level cells that have tasks */
-  space_list_cells_with_tasks(e->s);
+  space_list_useful_top_level_cells(e->s);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that all cells have been drifted to the current time.
@@ -3989,6 +2228,11 @@ void engine_rebuild(struct engine *e, int clean_smoothing_length_values) {
    * previously been active on this rank. */
   space_check_drift_point(e->s, e->ti_current,
                           e->policy & engine_policy_self_gravity);
+
+  if (e->policy & engine_policy_self_gravity) {
+    for (int k = 0; k < e->s->nr_local_cells; k++)
+      cell_check_foreign_multipole(&e->s->cells_top[e->s->local_cells_top[k]]);
+  }
 #endif
 
   /* Run through the tasks and mark as skip or not. */
@@ -4022,34 +2266,42 @@ void engine_prepare(struct engine *e) {
   const ticks tic = getticks();
 
   int drifted_all = 0;
+  int repartitioned = 0;
 
   /* Unskip active tasks and check for rebuild */
   if (!e->forcerebuild && !e->forcerepart && !e->restarting) engine_unskip(e);
 
+  const ticks tic3 = getticks();
+
 #ifdef WITH_MPI
   MPI_Allreduce(MPI_IN_PLACE, &e->forcerebuild, 1, MPI_INT, MPI_MAX,
                 MPI_COMM_WORLD);
 #endif
 
+  if (e->verbose)
+    message("Communicating rebuild flag took %.3f %s.",
+            clocks_from_ticks(getticks() - tic3), clocks_getunit());
+
   /* Do we need repartitioning ? */
   if (e->forcerepart) {
 
     /* Let's start by drifting everybody to the current time */
-    engine_drift_all(e);
+    engine_drift_all(e, /*drift_mpole=*/0);
     drifted_all = 1;
 
     /* And repartition */
     engine_repartition(e);
+    repartitioned = 1;
   }
 
   /* Do we need rebuilding ? */
   if (e->forcerebuild) {
 
     /* Let's start by drifting everybody to the current time */
-    if (!e->restarting && !drifted_all) engine_drift_all(e);
+    if (!e->restarting && !drifted_all) engine_drift_all(e, /*drift_mpole=*/0);
 
     /* And rebuild */
-    engine_rebuild(e, 0);
+    engine_rebuild(e, repartitioned, 0);
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -4091,121 +2343,178 @@ void engine_barrier(struct engine *e) {
 }
 
 /**
- * @brief Mapping function to collect the data from the kick.
+ * @brief Recursive function gathering end-of-step data.
  *
- * @param c A super-cell.
+ * We recurse until we encounter a timestep or time-step MPI recv task
+ * as the values will have been set at that level. We then bring these
+ * values upwards.
+ *
+ * @param c The #cell to recurse into.
+ * @param e The #engine.
  */
-void engine_collect_end_of_step_recurse(struct cell *c) {
+void engine_collect_end_of_step_recurse(struct cell *c,
+                                        const struct engine *e) {
 
 /* Skip super-cells (Their values are already set) */
 #ifdef WITH_MPI
-  if (c->timestep != NULL || c->recv_ti != NULL) return;
+  if (c->timestep != NULL || c->mpi.recv_ti != NULL) return;
 #else
   if (c->timestep != NULL) return;
 #endif /* WITH_MPI */
 
   /* Counters for the different quantities. */
   size_t updated = 0, g_updated = 0, s_updated = 0;
+  size_t inhibited = 0, g_inhibited = 0, s_inhibited = 0;
   integertime_t ti_hydro_end_min = max_nr_timesteps, ti_hydro_end_max = 0,
                 ti_hydro_beg_max = 0;
   integertime_t ti_gravity_end_min = max_nr_timesteps, ti_gravity_end_max = 0,
                 ti_gravity_beg_max = 0;
+  integertime_t ti_stars_end_min = max_nr_timesteps;
 
   /* Collect the values from the progeny. */
   for (int k = 0; k < 8; k++) {
     struct cell *cp = c->progeny[k];
-    if (cp != NULL && (cp->count > 0 || cp->gcount > 0 || cp->scount > 0)) {
+    if (cp != NULL &&
+        (cp->hydro.count > 0 || cp->grav.count > 0 || cp->stars.count > 0)) {
 
       /* Recurse */
-      engine_collect_end_of_step_recurse(cp);
+      engine_collect_end_of_step_recurse(cp, e);
 
       /* And update */
-      ti_hydro_end_min = min(ti_hydro_end_min, cp->ti_hydro_end_min);
-      ti_hydro_end_max = max(ti_hydro_end_max, cp->ti_hydro_end_max);
-      ti_hydro_beg_max = max(ti_hydro_beg_max, cp->ti_hydro_beg_max);
-      ti_gravity_end_min = min(ti_gravity_end_min, cp->ti_gravity_end_min);
-      ti_gravity_end_max = max(ti_gravity_end_max, cp->ti_gravity_end_max);
-      ti_gravity_beg_max = max(ti_gravity_beg_max, cp->ti_gravity_beg_max);
-      updated += cp->updated;
-      g_updated += cp->g_updated;
-      s_updated += cp->s_updated;
+      ti_hydro_end_min = min(ti_hydro_end_min, cp->hydro.ti_end_min);
+      ti_hydro_end_max = max(ti_hydro_end_max, cp->hydro.ti_end_max);
+      ti_hydro_beg_max = max(ti_hydro_beg_max, cp->hydro.ti_beg_max);
+
+      ti_gravity_end_min = min(ti_gravity_end_min, cp->grav.ti_end_min);
+      ti_gravity_end_max = max(ti_gravity_end_max, cp->grav.ti_end_max);
+      ti_gravity_beg_max = max(ti_gravity_beg_max, cp->grav.ti_beg_max);
+
+      ti_stars_end_min = min(ti_stars_end_min, cp->stars.ti_end_min);
+
+      updated += cp->hydro.updated;
+      g_updated += cp->grav.updated;
+      s_updated += cp->stars.updated;
+
+      inhibited += cp->hydro.inhibited;
+      g_inhibited += cp->grav.inhibited;
+      s_inhibited += cp->stars.inhibited;
 
       /* Collected, so clear for next time. */
-      cp->updated = 0;
-      cp->g_updated = 0;
-      cp->s_updated = 0;
+      cp->hydro.updated = 0;
+      cp->grav.updated = 0;
+      cp->stars.updated = 0;
     }
   }
 
   /* Store the collected values in the cell. */
-  c->ti_hydro_end_min = ti_hydro_end_min;
-  c->ti_hydro_end_max = ti_hydro_end_max;
-  c->ti_hydro_beg_max = ti_hydro_beg_max;
-  c->ti_gravity_end_min = ti_gravity_end_min;
-  c->ti_gravity_end_max = ti_gravity_end_max;
-  c->ti_gravity_beg_max = ti_gravity_beg_max;
-  c->updated = updated;
-  c->g_updated = g_updated;
-  c->s_updated = s_updated;
+  c->hydro.ti_end_min = ti_hydro_end_min;
+  c->hydro.ti_end_max = ti_hydro_end_max;
+  c->hydro.ti_beg_max = ti_hydro_beg_max;
+  c->grav.ti_end_min = ti_gravity_end_min;
+  c->grav.ti_end_max = ti_gravity_end_max;
+  c->grav.ti_beg_max = ti_gravity_beg_max;
+  c->stars.ti_end_min = ti_stars_end_min;
+  c->hydro.updated = updated;
+  c->grav.updated = g_updated;
+  c->stars.updated = s_updated;
+  c->hydro.inhibited = inhibited;
+  c->grav.inhibited = g_inhibited;
+  c->stars.inhibited = s_inhibited;
 }
 
+/**
+ * @brief Mapping function to collect the data from the end of the step
+ *
+ * This function will call a recursive function on all the top-level cells
+ * to collect the information we are after.
+ *
+ * @param map_data The list of cells with tasks on this node.
+ * @param num_elements The number of elements in the list this thread will work
+ * on.
+ * @param extra_data The #engine.
+ */
 void engine_collect_end_of_step_mapper(void *map_data, int num_elements,
                                        void *extra_data) {
 
   struct end_of_step_data *data = (struct end_of_step_data *)extra_data;
-  struct engine *e = data->e;
+  const struct engine *e = data->e;
   struct space *s = e->s;
   int *local_cells = (int *)map_data;
 
   /* Local collectible */
-  size_t updates = 0, g_updates = 0, s_updates = 0;
+  size_t updated = 0, g_updated = 0, s_updated = 0;
+  size_t inhibited = 0, g_inhibited = 0, s_inhibited = 0;
   integertime_t ti_hydro_end_min = max_nr_timesteps, ti_hydro_end_max = 0,
                 ti_hydro_beg_max = 0;
   integertime_t ti_gravity_end_min = max_nr_timesteps, ti_gravity_end_max = 0,
                 ti_gravity_beg_max = 0;
+  integertime_t ti_stars_end_min = max_nr_timesteps;
 
   for (int ind = 0; ind < num_elements; ind++) {
     struct cell *c = &s->cells_top[local_cells[ind]];
 
-    if (c->count > 0 || c->gcount > 0 || c->scount > 0) {
+    if (c->hydro.count > 0 || c->grav.count > 0 || c->stars.count > 0) {
 
       /* Make the top-cells recurse */
-      engine_collect_end_of_step_recurse(c);
+      engine_collect_end_of_step_recurse(c, e);
 
       /* And aggregate */
-      ti_hydro_end_min = min(ti_hydro_end_min, c->ti_hydro_end_min);
-      ti_hydro_end_max = max(ti_hydro_end_max, c->ti_hydro_end_max);
-      ti_hydro_beg_max = max(ti_hydro_beg_max, c->ti_hydro_beg_max);
-      ti_gravity_end_min = min(ti_gravity_end_min, c->ti_gravity_end_min);
-      ti_gravity_end_max = max(ti_gravity_end_max, c->ti_gravity_end_max);
-      ti_gravity_beg_max = max(ti_gravity_beg_max, c->ti_gravity_beg_max);
-      updates += c->updated;
-      g_updates += c->g_updated;
-      s_updates += c->s_updated;
+      if (c->hydro.ti_end_min > e->ti_current)
+        ti_hydro_end_min = min(ti_hydro_end_min, c->hydro.ti_end_min);
+      ti_hydro_end_max = max(ti_hydro_end_max, c->hydro.ti_end_max);
+      ti_hydro_beg_max = max(ti_hydro_beg_max, c->hydro.ti_beg_max);
+
+      if (c->grav.ti_end_min > e->ti_current)
+        ti_gravity_end_min = min(ti_gravity_end_min, c->grav.ti_end_min);
+      ti_gravity_end_max = max(ti_gravity_end_max, c->grav.ti_end_max);
+      ti_gravity_beg_max = max(ti_gravity_beg_max, c->grav.ti_beg_max);
+
+      if (c->stars.ti_end_min > e->ti_current)
+        ti_stars_end_min = min(ti_stars_end_min, c->stars.ti_end_min);
+
+      updated += c->hydro.updated;
+      g_updated += c->grav.updated;
+      s_updated += c->stars.updated;
+
+      inhibited += c->hydro.inhibited;
+      g_inhibited += c->grav.inhibited;
+      s_inhibited += c->stars.inhibited;
 
       /* Collected, so clear for next time. */
-      c->updated = 0;
-      c->g_updated = 0;
-      c->s_updated = 0;
+      c->hydro.updated = 0;
+      c->grav.updated = 0;
+      c->stars.updated = 0;
     }
   }
 
   /* Let's write back to the global data.
    * We use the space lock to garanty single access*/
   if (lock_lock(&s->lock) == 0) {
-    data->updates += updates;
-    data->g_updates += g_updates;
-    data->s_updates += s_updates;
-    data->ti_hydro_end_min = min(ti_hydro_end_min, data->ti_hydro_end_min);
+    data->updated += updated;
+    data->g_updated += g_updated;
+    data->s_updated += s_updated;
+
+    data->inhibited += inhibited;
+    data->g_inhibited += g_inhibited;
+    data->s_inhibited += s_inhibited;
+
+    if (ti_hydro_end_min > e->ti_current)
+      data->ti_hydro_end_min = min(ti_hydro_end_min, data->ti_hydro_end_min);
     data->ti_hydro_end_max = max(ti_hydro_end_max, data->ti_hydro_end_max);
     data->ti_hydro_beg_max = max(ti_hydro_beg_max, data->ti_hydro_beg_max);
-    data->ti_gravity_end_min =
-        min(ti_gravity_end_min, data->ti_gravity_end_min);
+
+    if (ti_gravity_end_min > e->ti_current)
+      data->ti_gravity_end_min =
+          min(ti_gravity_end_min, data->ti_gravity_end_min);
     data->ti_gravity_end_max =
         max(ti_gravity_end_max, data->ti_gravity_end_max);
     data->ti_gravity_beg_max =
         max(ti_gravity_beg_max, data->ti_gravity_beg_max);
+
+    if (ti_stars_end_min > e->ti_current)
+      data->ti_stars_end_min = min(ti_stars_end_min, data->ti_stars_end_min);
   }
+
   if (lock_unlock(&s->lock) != 0) error("Failed to unlock the space");
 }
 
@@ -4229,9 +2538,10 @@ void engine_collect_end_of_step_mapper(void *map_data, int num_elements,
 void engine_collect_end_of_step(struct engine *e, int apply) {
 
   const ticks tic = getticks();
-  const struct space *s = e->s;
+  struct space *s = e->s;
   struct end_of_step_data data;
-  data.updates = 0, data.g_updates = 0, data.s_updates = 0;
+  data.updated = 0, data.g_updated = 0, data.s_updated = 0;
+  data.inhibited = 0, data.g_inhibited = 0, data.s_inhibited = 0;
   data.ti_hydro_end_min = max_nr_timesteps, data.ti_hydro_end_max = 0,
   data.ti_hydro_beg_max = 0;
   data.ti_gravity_end_min = max_nr_timesteps, data.ti_gravity_end_max = 0,
@@ -4240,14 +2550,22 @@ void engine_collect_end_of_step(struct engine *e, int apply) {
 
   /* Collect information from the local top-level cells */
   threadpool_map(&e->threadpool, engine_collect_end_of_step_mapper,
-                 s->local_cells_top, s->nr_local_cells, sizeof(int), 0, &data);
+                 s->local_cells_with_tasks_top, s->nr_local_cells_with_tasks,
+                 sizeof(int), 0, &data);
+
+  /* Store the local number of inhibited particles */
+  s->nr_inhibited_parts = data.inhibited;
+  s->nr_inhibited_gparts = data.g_inhibited;
+  s->nr_inhibited_sparts = data.s_inhibited;
 
   /* Store these in the temporary collection group. */
-  collectgroup1_init(&e->collect_group1, data.updates, data.g_updates,
-                     data.s_updates, data.ti_hydro_end_min,
-                     data.ti_hydro_end_max, data.ti_hydro_beg_max,
-                     data.ti_gravity_end_min, data.ti_gravity_end_max,
-                     data.ti_gravity_beg_max, e->forcerebuild);
+  collectgroup1_init(
+      &e->collect_group1, data.updated, data.g_updated, data.s_updated,
+      data.inhibited, data.g_inhibited, data.s_inhibited, data.ti_hydro_end_min,
+      data.ti_hydro_end_max, data.ti_hydro_beg_max, data.ti_gravity_end_min,
+      data.ti_gravity_end_max, data.ti_gravity_beg_max, e->forcerebuild,
+      e->s->tot_cells, e->sched.nr_tasks,
+      (float)e->sched.nr_tasks / (float)e->s->tot_cells);
 
 /* Aggregate collective data from the different nodes for this step. */
 #ifdef WITH_MPI
@@ -4272,21 +2590,37 @@ void engine_collect_end_of_step(struct engine *e, int apply) {
             in_i[1], e->collect_group1.ti_gravity_end_min);
 
     long long in_ll[3], out_ll[3];
-    out_ll[0] = data.updates;
-    out_ll[1] = data.g_updates;
-    out_ll[2] = data.s_updates;
+    out_ll[0] = data.updated;
+    out_ll[1] = data.g_updated;
+    out_ll[2] = data.s_updated;
+    if (MPI_Allreduce(out_ll, in_ll, 3, MPI_LONG_LONG_INT, MPI_SUM,
+                      MPI_COMM_WORLD) != MPI_SUCCESS)
+      error("Failed to aggregate particle counts.");
+    if (in_ll[0] != (long long)e->collect_group1.updated)
+      error("Failed to get same updated, is %lld, should be %lld", in_ll[0],
+            e->collect_group1.updated);
+    if (in_ll[1] != (long long)e->collect_group1.g_updated)
+      error("Failed to get same g_updated, is %lld, should be %lld", in_ll[1],
+            e->collect_group1.g_updated);
+    if (in_ll[2] != (long long)e->collect_group1.s_updated)
+      error("Failed to get same s_updated, is %lld, should be %lld", in_ll[2],
+            e->collect_group1.s_updated);
+
+    out_ll[0] = data.inhibited;
+    out_ll[1] = data.g_inhibited;
+    out_ll[2] = data.s_inhibited;
     if (MPI_Allreduce(out_ll, in_ll, 3, MPI_LONG_LONG_INT, MPI_SUM,
                       MPI_COMM_WORLD) != MPI_SUCCESS)
       error("Failed to aggregate particle counts.");
-    if (in_ll[0] != (long long)e->collect_group1.updates)
-      error("Failed to get same updates, is %lld, should be %lld", in_ll[0],
-            e->collect_group1.updates);
-    if (in_ll[1] != (long long)e->collect_group1.g_updates)
-      error("Failed to get same g_updates, is %lld, should be %lld", in_ll[1],
-            e->collect_group1.g_updates);
-    if (in_ll[2] != (long long)e->collect_group1.s_updates)
-      error("Failed to get same s_updates, is %lld, should be %lld", in_ll[2],
-            e->collect_group1.s_updates);
+    if (in_ll[0] != (long long)e->collect_group1.inhibited)
+      error("Failed to get same inhibited, is %lld, should be %lld", in_ll[0],
+            e->collect_group1.inhibited);
+    if (in_ll[1] != (long long)e->collect_group1.g_inhibited)
+      error("Failed to get same g_inhibited, is %lld, should be %lld", in_ll[1],
+            e->collect_group1.g_inhibited);
+    if (in_ll[2] != (long long)e->collect_group1.s_inhibited)
+      error("Failed to get same s_inhibited, is %lld, should be %lld", in_ll[2],
+            e->collect_group1.s_inhibited);
 
     int buff = 0;
     if (MPI_Allreduce(&e->forcerebuild, &buff, 1, MPI_INT, MPI_MAX,
@@ -4322,8 +2656,7 @@ void engine_print_stats(struct engine *e) {
   /* Check that all cells have been drifted to the current time.
    * That can include cells that have not
    * previously been active on this rank. */
-  space_check_drift_point(e->s, e->ti_current,
-                          e->policy & engine_policy_self_gravity);
+  space_check_drift_point(e->s, e->ti_current, /*chek_mpoles=*/0);
 
   /* Be verbose about this */
   if (e->nodeID == 0) {
@@ -4394,17 +2727,27 @@ void engine_skip_force_and_kick(struct engine *e) {
 
     /* Skip everything that updates the particles */
     if (t->type == task_type_drift_part || t->type == task_type_drift_gpart ||
-        t->type == task_type_kick1 || t->type == task_type_kick2 ||
-        t->type == task_type_timestep || t->subtype == task_subtype_force ||
-        t->subtype == task_subtype_grav || t->type == task_type_end_force ||
+        t->type == task_type_drift_spart || t->type == task_type_kick1 ||
+        t->type == task_type_kick2 || t->type == task_type_timestep ||
+        t->type == task_type_timestep_limiter ||
+        t->subtype == task_subtype_force ||
+        t->subtype == task_subtype_limiter || t->subtype == task_subtype_grav ||
+        t->type == task_type_end_hydro_force ||
+        t->type == task_type_end_grav_force ||
         t->type == task_type_grav_long_range || t->type == task_type_grav_mm ||
-        t->type == task_type_grav_down || t->type == task_type_cooling ||
-        t->type == task_type_sourceterms)
+        t->type == task_type_grav_down || t->type == task_type_grav_down_in ||
+        t->type == task_type_drift_gpart_out || t->type == task_type_cooling ||
+        t->type == task_type_stars_in || t->type == task_type_stars_out ||
+        t->type == task_type_star_formation ||
+        t->type == task_type_extra_ghost ||
+        t->subtype == task_subtype_gradient ||
+        t->subtype == task_subtype_stars_feedback)
       t->skip = 1;
   }
 
   /* Run through the cells and clear some flags. */
   space_map_cells_pre(e->s, 1, cell_clear_drift_flags, NULL);
+  space_map_cells_pre(e->s, 1, cell_clear_limiter_flags, NULL);
 }
 
 /**
@@ -4422,7 +2765,8 @@ void engine_skip_drift(struct engine *e) {
     struct task *t = &tasks[i];
 
     /* Skip everything that moves the particles */
-    if (t->type == task_type_drift_part || t->type == task_type_drift_gpart)
+    if (t->type == task_type_drift_part || t->type == task_type_drift_gpart ||
+        t->type == task_type_drift_spart)
       t->skip = 1;
   }
 
@@ -4436,7 +2780,6 @@ void engine_skip_drift(struct engine *e) {
  * @param e The #engine.
  */
 void engine_launch(struct engine *e) {
-
   const ticks tic = getticks();
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -4506,7 +2849,12 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
 
   /* Update the softening lengths */
   if (e->policy & engine_policy_self_gravity)
-    gravity_update(e->gravity_properties, e->cosmology);
+    gravity_props_update(e->gravity_properties, e->cosmology);
+
+  /* Udpate the hydro properties */
+  if (e->policy & engine_policy_hydro)
+    hydro_props_update(e->hydro_properties, e->gravity_properties,
+                       e->cosmology);
 
   /* Start by setting the particles in a good state */
   if (e->nodeID == 0) message("Setting particles to a valid state...");
@@ -4515,7 +2863,7 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   if (e->nodeID == 0) message("Computing initial gas densities.");
 
   /* Construct all cells and tasks to start everything */
-  engine_rebuild(e, clean_h_values);
+  engine_rebuild(e, 0, clean_h_values);
 
   /* No time integration. We just want the density and ghosts */
   engine_skip_force_and_kick(e);
@@ -4526,6 +2874,21 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   /* Init the particle data (by hand). */
   space_init_parts(s, e->verbose);
   space_init_gparts(s, e->verbose);
+  space_init_sparts(s, e->verbose);
+
+  /* Update the cooling function */
+  if ((e->policy & engine_policy_cooling) ||
+      (e->policy & engine_policy_temperature))
+    cooling_update(e->cosmology, e->cooling_func);
+
+#ifdef WITH_LOGGER
+  /* Mark the first time step in the particle logger file. */
+  logger_log_timestamp(e->logger, e->ti_current, e->time,
+                       &e->logger->timestamp_offset);
+  /* Make sure that we have enough space in the particle logger file
+   * to store the particles in current time step. */
+  logger_ensure_size(e->logger, e->total_nr_parts, e->total_nr_gparts, 0);
+#endif
 
   /* Now, launch the calculation */
   TIMER_TIC;
@@ -4552,7 +2915,7 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   long long num_gpart_mpole = 0;
   if (e->policy & engine_policy_self_gravity) {
     for (int i = 0; i < e->s->nr_cells; ++i)
-      num_gpart_mpole += e->s->cells_top[i].multipole->m_pole.num_gpart;
+      num_gpart_mpole += e->s->cells_top[i].grav.multipole->m_pole.num_gpart;
     if (num_gpart_mpole != e->total_nr_gparts)
       error(
           "Top-level multipoles don't contain the total number of gpart "
@@ -4565,8 +2928,8 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   /* Now time to get ready for the first time-step */
   if (e->nodeID == 0) message("Running initial fake time-step.");
 
-  /* Prepare all the tasks again for a new round */
-  engine_marktasks(e);
+  /* Construct all cells again for a new round (need to update h_max) */
+  engine_rebuild(e, 0, 0);
 
   /* No drift this time */
   engine_skip_drift(e);
@@ -4574,6 +2937,7 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   /* Init the particle data (by hand). */
   space_init_parts(e->s, e->verbose);
   space_init_gparts(e->s, e->verbose);
+  space_init_sparts(e->s, e->verbose);
 
   /* Print the number of active tasks ? */
   if (e->verbose) engine_print_task_counts(e);
@@ -4584,7 +2948,8 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
     gravity_exact_force_compute(e->s, e);
 #endif
 
-  if (e->nodeID == 0) scheduler_write_dependencies(&e->sched, e->verbose);
+  scheduler_write_dependencies(&e->sched, e->verbose);
+  if (e->nodeID == 0) scheduler_write_task_level(&e->sched);
 
   /* Run the 0th time-step */
   TIMER_TIC2;
@@ -4597,6 +2962,11 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
     gravity_exact_force_check(e->s, e, 1e-1);
 #endif
 
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Make sure all woken-up particles have been processed */
+  space_check_limiter(e->s);
+#endif
+
   /* Recover the (integer) end of the next time-step */
   engine_collect_end_of_step(e, 1);
 
@@ -4609,6 +2979,10 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
     double *prev_x = s->parts[0].x;
     long long *prev_id = &s->parts[0].id;
     for (size_t k = 1; k < s->nr_parts; k++) {
+
+      /* Ignore fake buffer particles for on-the-fly creation */
+      if (s->parts[k].time_bin == time_bin_not_created) continue;
+
       if (prev_x[0] == s->parts[k].x[0] && prev_x[1] == s->parts[k].x[1] &&
           prev_x[2] == s->parts[k].x[2]) {
         if (e->verbose)
@@ -4631,6 +3005,10 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
     int failed = 0;
     double *prev_x = s->gparts[0].x;
     for (size_t k = 1; k < s->nr_gparts; k++) {
+
+      /* Ignore fake buffer particles for on-the-fly creation */
+      if (s->gparts[k].time_bin == time_bin_not_created) continue;
+
       if (prev_x[0] == s->gparts[k].x[0] && prev_x[1] == s->gparts[k].x[1] &&
           prev_x[2] == s->gparts[k].x[2]) {
         if (e->verbose)
@@ -4655,12 +3033,27 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs,
   if (s->cells_top != NULL && s->nr_parts > 0) {
     for (int i = 0; i < s->nr_cells; i++) {
       struct cell *c = &s->cells_top[i];
-      if (c->nodeID == engine_rank && c->count > 0) {
-        float part_h_max = c->parts[0].h;
-        for (int k = 1; k < c->count; k++) {
-          if (c->parts[k].h > part_h_max) part_h_max = c->parts[k].h;
+      if (c->nodeID == engine_rank && c->hydro.count > 0) {
+        float part_h_max = c->hydro.parts[0].h;
+        for (int k = 1; k < c->hydro.count; k++) {
+          if (c->hydro.parts[k].h > part_h_max)
+            part_h_max = c->hydro.parts[k].h;
+        }
+        c->hydro.h_max = max(part_h_max, c->hydro.h_max);
+      }
+    }
+  }
+
+  if (s->cells_top != NULL && s->nr_sparts > 0) {
+    for (int i = 0; i < s->nr_cells; i++) {
+      struct cell *c = &s->cells_top[i];
+      if (c->nodeID == engine_rank && c->stars.count > 0) {
+        float spart_h_max = c->stars.parts[0].h;
+        for (int k = 1; k < c->stars.count; k++) {
+          if (c->stars.parts[k].h > spart_h_max)
+            spart_h_max = c->stars.parts[k].h;
         }
-        c->h_max = max(part_h_max, c->h_max);
+        c->stars.h_max = max(spart_h_max, c->stars.h_max);
       }
     }
   }
@@ -4693,32 +3086,36 @@ void engine_step(struct engine *e) {
   struct clocks_time time1, time2;
   clocks_gettime(&time1);
 
-#ifdef SWIFT_DEBUG_TASKS
   e->tic_step = getticks();
-#endif
 
   if (e->nodeID == 0) {
 
     /* Print some information to the screen */
     printf(
-        "  %6d %14e %14e %10.5f %14e %4d %4d %12lld %12lld %12lld %21.3f %6d\n",
+        "  %6d %14e %12.7f %12.7f %14e %4d %4d %12lld %12lld %12lld %21.3f "
+        "%6d\n",
         e->step, e->time, e->cosmology->a, e->cosmology->z, e->time_step,
         e->min_active_bin, e->max_active_bin, e->updates, e->g_updates,
         e->s_updates, e->wallclock_time, e->step_props);
+#ifdef SWIFT_DEBUG_CHECKS
     fflush(stdout);
+#endif
 
     if (!e->restarting)
-      fprintf(e->file_timesteps,
-              "  %6d %14e %14e %10.5f %14e %4d %4d %12lld %12lld %12lld %21.3f "
-              "%6d\n",
-              e->step, e->time, e->cosmology->a, e->cosmology->z, e->time_step,
-              e->min_active_bin, e->max_active_bin, e->updates, e->g_updates,
-              e->s_updates, e->wallclock_time, e->step_props);
+      fprintf(
+          e->file_timesteps,
+          "  %6d %14e %12.7f %12.7f %14e %4d %4d %12lld %12lld %12lld %21.3f "
+          "%6d\n",
+          e->step, e->time, e->cosmology->a, e->cosmology->z, e->time_step,
+          e->min_active_bin, e->max_active_bin, e->updates, e->g_updates,
+          e->s_updates, e->wallclock_time, e->step_props);
+#ifdef SWIFT_DEBUG_CHECKS
     fflush(e->file_timesteps);
+#endif
   }
 
   /* We need some cells to exist but not the whole task stuff. */
-  if (e->restarting) space_rebuild(e->s, e->verbose);
+  if (e->restarting) space_rebuild(e->s, 0, e->verbose);
 
   /* Move forward in time */
   e->ti_old = e->ti_current;
@@ -4730,7 +3127,7 @@ void engine_step(struct engine *e) {
   e->step_props = engine_step_prop_none;
 
   /* When restarting, move everyone to the current time. */
-  if (e->restarting) engine_drift_all(e);
+  if (e->restarting) engine_drift_all(e, /*drift_mpole=*/1);
 
   /* Get the physical value of the time and time-step size */
   if (e->policy & engine_policy_cosmology) {
@@ -4744,13 +3141,23 @@ void engine_step(struct engine *e) {
     e->time_step = (e->ti_current - e->ti_old) * e->time_base;
   }
 
+  /* Update the cooling function */
+  if ((e->policy & engine_policy_cooling) ||
+      (e->policy & engine_policy_temperature))
+    cooling_update(e->cosmology, e->cooling_func);
+
   /*****************************************************/
   /* OK, we now know what the next end of time-step is */
   /*****************************************************/
 
   /* Update the softening lengths */
   if (e->policy & engine_policy_self_gravity)
-    gravity_update(e->gravity_properties, e->cosmology);
+    gravity_props_update(e->gravity_properties, e->cosmology);
+
+  /* Udpate the hydro properties */
+  if (e->policy & engine_policy_hydro)
+    hydro_props_update(e->hydro_properties, e->gravity_properties,
+                       e->cosmology);
 
   /* Trigger a tree-rebuild if we passed the frequency threshold */
   if ((e->policy & engine_policy_self_gravity) &&
@@ -4758,9 +3165,18 @@ void engine_step(struct engine *e) {
        ((double)e->total_nr_gparts) * e->gravity_properties->rebuild_frequency))
     e->forcerebuild = 1;
 
+#ifdef WITH_LOGGER
+  /* Mark the current time step in the particle logger file. */
+  logger_log_timestamp(e->logger, e->ti_current, e->time,
+                       &e->logger->timestamp_offset);
+  /* Make sure that we have enough space in the particle logger file
+   * to store the particles in current time step. */
+  logger_ensure_size(e->logger, e->total_nr_parts, e->total_nr_gparts, 0);
+#endif
+
   /* Are we drifting everything (a la Gadget/GIZMO) ? */
   if (e->policy & engine_policy_drift_all && !e->forcerebuild)
-    engine_drift_all(e);
+    engine_drift_all(e, /*drift_mpole=*/1);
 
   /* Are we reconstructing the multipoles or drifting them ?*/
   if ((e->policy & engine_policy_self_gravity) && !e->forcerebuild) {
@@ -4790,7 +3206,7 @@ void engine_step(struct engine *e) {
   long long num_gpart_mpole = 0;
   if (e->policy & engine_policy_self_gravity) {
     for (int i = 0; i < e->s->nr_cells; ++i)
-      num_gpart_mpole += e->s->cells_top[i].multipole->m_pole.num_gpart;
+      num_gpart_mpole += e->s->cells_top[i].grav.multipole->m_pole.num_gpart;
     if (num_gpart_mpole != e->total_nr_gparts)
       error(
           "Multipoles don't contain the total number of gpart mpoles=%lld "
@@ -4816,15 +3232,22 @@ void engine_step(struct engine *e) {
     gravity_exact_force_check(e->s, e, 1e-1);
 #endif
 
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Make sure all woken-up particles have been processed */
+  space_check_limiter(e->s);
+#endif
+
   /* Collect information about the next time-step */
-  engine_collect_end_of_step(e, 0);
+  engine_collect_end_of_step(e, 1);
   e->forcerebuild = e->collect_group1.forcerebuild;
+  e->updates_since_rebuild += e->collect_group1.updated;
+  e->g_updates_since_rebuild += e->collect_group1.g_updated;
+  e->s_updates_since_rebuild += e->collect_group1.s_updated;
 
-  /* Now apply all the collected time step updates and particle counts. */
-  collectgroup1_apply(&e->collect_group1, e);
-  e->updates_since_rebuild += e->collect_group1.updates;
-  e->g_updates_since_rebuild += e->collect_group1.g_updates;
-  e->s_updates_since_rebuild += e->collect_group1.s_updates;
+#ifdef SWIFT_DEBUG_CHECKS
+  if (e->ti_end_min == e->ti_current && e->ti_end_min < max_nr_timesteps)
+    error("Obtained a time-step of size 0");
+#endif
 
   /********************************************************/
   /* OK, we are done with the regular stuff. Time for i/o */
@@ -4841,10 +3264,8 @@ void engine_step(struct engine *e) {
   clocks_gettime(&time2);
   e->wallclock_time = (float)clocks_diff(&time1, &time2);
 
-#ifdef SWIFT_DEBUG_TASKS
   /* Time in ticks at the end of this step. */
   e->toc_step = getticks();
-#endif
 }
 
 /**
@@ -4858,184 +3279,174 @@ void engine_step(struct engine *e) {
  */
 void engine_check_for_dumps(struct engine *e) {
 
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const int with_stf = (e->policy & engine_policy_structure_finding);
+
+  /* What kind of output are we getting? */
+  enum output_type {
+    output_none,
+    output_snapshot,
+    output_statistics,
+    output_stf
+  };
+
+  /* What kind of output do we want? And at which time ?
+   * Find the earliest output (amongst all kinds) that takes place
+   * before the next time-step */
+  enum output_type type = output_none;
+  integertime_t ti_output = max_nr_timesteps;
+
   /* Save some statistics ? */
-  int save_stats = 0;
-  if (e->ti_end_min > e->ti_next_stats && e->ti_next_stats > 0) save_stats = 1;
+  if (e->ti_end_min > e->ti_next_stats && e->ti_next_stats > 0) {
+    if (e->ti_next_stats < ti_output) {
+      ti_output = e->ti_next_stats;
+      type = output_statistics;
+    }
+  }
 
   /* Do we want a snapshot? */
-  int dump_snapshot = 0;
-  if (e->ti_end_min > e->ti_next_snapshot && e->ti_next_snapshot > 0)
-    dump_snapshot = 1;
+  if (e->ti_end_min > e->ti_next_snapshot && e->ti_next_snapshot > 0) {
+    if (e->ti_next_snapshot < ti_output) {
+      ti_output = e->ti_next_snapshot;
+      type = output_snapshot;
+    }
+  }
 
   /* Do we want to perform structure finding? */
-  int run_stf = 0;
-  if ((e->policy & engine_policy_structure_finding)) {
-    if (e->stf_output_freq_format == STEPS && e->step % e->deltaStepSTF == 0)
-      run_stf = 1;
-    else if (e->stf_output_freq_format == TIME &&
-             e->ti_end_min > e->ti_nextSTF && e->ti_nextSTF > 0)
-      run_stf = 1;
+  if (with_stf) {
+    if (e->ti_end_min > e->ti_next_stf && e->ti_next_stf > 0) {
+      if (e->ti_next_stf < ti_output) {
+        ti_output = e->ti_next_stf;
+        type = output_stf;
+      }
+    }
   }
 
   /* Store information before attempting extra dump-related drifts */
-  integertime_t ti_current = e->ti_current;
-  timebin_t max_active_bin = e->max_active_bin;
-  double time = e->time;
+  const integertime_t ti_current = e->ti_current;
+  const timebin_t max_active_bin = e->max_active_bin;
+  const double time = e->time;
+
+  while (type != output_none) {
+
+    /* Let's fake that we are at the dump time */
+    e->ti_current = ti_output;
+    e->max_active_bin = 0;
+    if (with_cosmology) {
+      cosmology_update(e->cosmology, e->physical_constants, e->ti_current);
+      e->time = e->cosmology->time;
+    } else {
+      e->time = ti_output * e->time_base + e->time_begin;
+    }
 
-  while (save_stats || dump_snapshot || run_stf) {
+    /* Drift everyone */
+    engine_drift_all(e, /*drift_mpole=*/0);
 
     /* Write some form of output */
-    if (dump_snapshot && save_stats) {
-
-      /* If both, need to figure out which one occurs first */
-      if (e->ti_next_stats == e->ti_next_snapshot) {
-
-        /* Let's fake that we are at the common dump time */
-        e->ti_current = e->ti_next_snapshot;
-        e->max_active_bin = 0;
-        if (!(e->policy & engine_policy_cosmology))
-          e->time = e->ti_next_snapshot * e->time_base + e->time_begin;
-
-        /* Drift everyone */
-        engine_drift_all(e);
-
-        /* Dump everything */
-        engine_print_stats(e);
-        engine_dump_snapshot(e);
-
-      } else if (e->ti_next_stats < e->ti_next_snapshot) {
+    switch (type) {
+      case output_snapshot:
 
-        /* Let's fake that we are at the stats dump time */
-        e->ti_current = e->ti_next_stats;
-        e->max_active_bin = 0;
-        if (!(e->policy & engine_policy_cosmology))
-          e->time = e->ti_next_stats * e->time_base + e->time_begin;
+        /* Do we want a corresponding VELOCIraptor output? */
+        if (with_stf && e->snapshot_invoke_stf) {
 
-        /* Drift everyone */
-        engine_drift_all(e);
-
-        /* Dump stats */
-        engine_print_stats(e);
-
-        /* Let's fake that we are at the snapshot dump time */
-        e->ti_current = e->ti_next_snapshot;
-        e->max_active_bin = 0;
-        if (!(e->policy & engine_policy_cosmology))
-          e->time = e->ti_next_snapshot * e->time_base + e->time_begin;
-
-        /* Drift everyone */
-        engine_drift_all(e);
+#ifdef HAVE_VELOCIRAPTOR
+          velociraptor_invoke(e, /*linked_with_snap=*/1);
+          e->step_props |= engine_step_prop_stf;
+#else
+          error(
+              "Asking for a VELOCIraptor output but SWIFT was compiled without "
+              "the interface!");
+#endif
+        }
 
-        /* Dump snapshot */
+          /* Dump... */
+#ifdef WITH_LOGGER
+        /* Write a file containing the offsets in the particle logger. */
+        engine_dump_index(e);
+#else
         engine_dump_snapshot(e);
+#endif
 
-      } else if (e->ti_next_stats > e->ti_next_snapshot) {
-
-        /* Let's fake that we are at the snapshot dump time */
-        e->ti_current = e->ti_next_snapshot;
-        e->max_active_bin = 0;
-        if (!(e->policy & engine_policy_cosmology))
-          e->time = e->ti_next_snapshot * e->time_base + e->time_begin;
-
-        /* Drift everyone */
-        engine_drift_all(e);
-
-        /* Dump snapshot */
-        engine_dump_snapshot(e);
+        /* Free the memory allocated for VELOCIraptor i/o. */
+        if (with_stf && e->snapshot_invoke_stf) {
+#ifdef HAVE_VELOCIRAPTOR
+          free(e->s->gpart_group_data);
+          e->s->gpart_group_data = NULL;
+#endif
+        }
 
-        /* Let's fake that we are at the stats dump time */
-        e->ti_current = e->ti_next_stats;
-        e->max_active_bin = 0;
-        if (!(e->policy & engine_policy_cosmology))
-          e->time = e->ti_next_stats * e->time_base + e->time_begin;
+        /* ... and find the next output time */
+        engine_compute_next_snapshot_time(e);
+        break;
 
-        /* Drift everyone */
-        engine_drift_all(e);
+      case output_statistics:
 
-        /* Dump stats */
+        /* Dump */
         engine_print_stats(e);
-      }
-
-      /* Let's compute the time of the next outputs */
-      engine_compute_next_snapshot_time(e);
-      engine_compute_next_statistics_time(e);
-
-    } else if (dump_snapshot) {
-
-      /* Let's fake that we are at the snapshot dump time */
-      e->ti_current = e->ti_next_snapshot;
-      e->max_active_bin = 0;
-      if (!(e->policy & engine_policy_cosmology))
-        e->time = e->ti_next_snapshot * e->time_base + e->time_begin;
-
-      /* Drift everyone */
-      engine_drift_all(e);
-
-      /* Dump... */
-      engine_dump_snapshot(e);
 
-      /* ... and find the next output time */
-      engine_compute_next_snapshot_time(e);
+        /* and move on */
+        engine_compute_next_statistics_time(e);
 
-    } else if (save_stats) {
-
-      /* Let's fake that we are at the stats dump time */
-      e->ti_current = e->ti_next_stats;
-      e->max_active_bin = 0;
-      if (!(e->policy & engine_policy_cosmology))
-        e->time = e->ti_next_stats * e->time_base + e->time_begin;
-
-      /* Drift everyone */
-      engine_drift_all(e);
-
-      /* Dump */
-      engine_print_stats(e);
-
-      /* and move on */
-      engine_compute_next_statistics_time(e);
-    }
-
-    /* Perform structure finding? */
-    if (run_stf) {
+        break;
 
-    // MATTHIEU: Add a drift_all here. And check the order with the order i/o
-    // options.
+      case output_stf:
 
 #ifdef HAVE_VELOCIRAPTOR
-      velociraptor_init(e);
-      velociraptor_invoke(e);
+        /* Unleash the raptor! */
+        velociraptor_invoke(e, /*linked_with_snap=*/0);
+        e->step_props |= engine_step_prop_stf;
 
-      /* ... and find the next output time */
-      if (e->stf_output_freq_format == TIME) engine_compute_next_stf_time(e);
+        /* ... and find the next output time */
+        engine_compute_next_stf_time(e);
+#else
+        error(
+            "Asking for a VELOCIraptor output but SWIFT was compiled without "
+            "the interface!");
 #endif
+        break;
+
+      default:
+        error("Invalid dump type");
     }
 
     /* We need to see whether whether we are in the pathological case
      * where there can be another dump before the next step. */
 
+    type = output_none;
+    ti_output = max_nr_timesteps;
+
     /* Save some statistics ? */
-    save_stats = 0;
-    if (e->ti_end_min > e->ti_next_stats && e->ti_next_stats > 0)
-      save_stats = 1;
+    if (e->ti_end_min > e->ti_next_stats && e->ti_next_stats > 0) {
+      if (e->ti_next_stats < ti_output) {
+        ti_output = e->ti_next_stats;
+        type = output_statistics;
+      }
+    }
 
     /* Do we want a snapshot? */
-    dump_snapshot = 0;
-    if (e->ti_end_min > e->ti_next_snapshot && e->ti_next_snapshot > 0)
-      dump_snapshot = 1;
+    if (e->ti_end_min > e->ti_next_snapshot && e->ti_next_snapshot > 0) {
+      if (e->ti_next_snapshot < ti_output) {
+        ti_output = e->ti_next_snapshot;
+        type = output_snapshot;
+      }
+    }
 
     /* Do we want to perform structure finding? */
-    run_stf = 0;
-    if ((e->policy & engine_policy_structure_finding)) {
-      if (e->stf_output_freq_format == STEPS && e->step % e->deltaStepSTF == 0)
-        run_stf = 1;
-      else if (e->stf_output_freq_format == TIME &&
-               e->ti_end_min > e->ti_nextSTF && e->ti_nextSTF > 0)
-        run_stf = 1;
+    if (with_stf) {
+      if (e->ti_end_min > e->ti_next_stf && e->ti_next_stf > 0) {
+        if (e->ti_next_stf < ti_output) {
+          ti_output = e->ti_next_stf;
+          type = output_stf;
+        }
+      }
     }
-  }
+
+  } /* While loop over output types */
 
   /* Restore the information we stored */
   e->ti_current = ti_current;
+  if (e->policy & engine_policy_cosmology)
+    cosmology_update(e->cosmology, e->physical_constants, e->ti_current);
   e->max_active_bin = max_active_bin;
   e->time = time;
 }
@@ -5052,202 +3463,88 @@ void engine_dump_restarts(struct engine *e, int drifted_all, int force) {
   if (e->restart_dump) {
     ticks tic = getticks();
 
-    /* Dump when the time has arrived, or we are told to. */
-    int dump = ((tic > e->restart_next) || force);
-
-#ifdef WITH_MPI
-    /* Synchronize this action from rank 0 (ticks may differ between
-     * machines). */
-    MPI_Bcast(&dump, 1, MPI_INT, 0, MPI_COMM_WORLD);
-#endif
-    if (dump) {
-
-      if (e->nodeID == 0) message("Writing restart files");
-
-      /* Clean out the previous saved files, if found. Do this now as we are
-       * MPI synchronized. */
-      restart_remove_previous(e->restart_file);
-
-      /* Drift all particles first (may have just been done). */
-      if (!drifted_all) engine_drift_all(e);
-      restart_write(e, e->restart_file);
-
-      if (e->verbose)
-        message("Dumping restart files took %.3f %s",
-                clocks_from_ticks(getticks() - tic), clocks_getunit());
-
-      /* Time after which next dump will occur. */
-      e->restart_next += e->restart_dt;
-
-      /* Flag that we dumped the restarts */
-      e->step_props |= engine_step_prop_restarts;
-    }
-  }
-}
-
-/**
- * @brief Returns 1 if the simulation has reached its end point, 0 otherwise
- */
-int engine_is_done(struct engine *e) {
-  return !(e->ti_current < max_nr_timesteps);
-}
-
-/**
- * @brief Unskip all the tasks that act on active cells at this time.
- *
- * @param e The #engine.
- */
-void engine_unskip(struct engine *e) {
-
-  const ticks tic = getticks();
-  struct space *s = e->s;
-
-#ifdef WITH_PROFILER
-  static int count = 0;
-  char filename[100];
-  sprintf(filename, "/tmp/swift_runner_do_usnkip_mapper_%06i.prof", count++);
-  ProfilerStart(filename);
-#endif  // WITH_PROFILER
-
-  /* Move the active local cells to the top of the list. */
-  int *local_cells = e->s->local_cells_top;
-  int num_active_cells = 0;
-  for (int k = 0; k < s->nr_local_cells; k++) {
-    struct cell *c = &s->cells_top[local_cells[k]];
-    if ((e->policy & engine_policy_hydro && cell_is_active_hydro(c, e)) ||
-        (e->policy &
-             (engine_policy_self_gravity | engine_policy_external_gravity) &&
-         cell_is_active_gravity(c, e))) {
-      if (num_active_cells != k)
-        memswap(&local_cells[k], &local_cells[num_active_cells], sizeof(int));
-      num_active_cells += 1;
-    }
-  }
-
-  /* Activate all the regular tasks */
-  threadpool_map(&e->threadpool, runner_do_unskip_mapper, local_cells,
-                 num_active_cells, sizeof(int), 1, e);
-
-#ifdef WITH_PROFILER
-  ProfilerStop();
-#endif  // WITH_PROFILER
-
-  if (e->verbose)
-    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
-            clocks_getunit());
-}
-
-/**
- * @brief Mapper function to drift *all* particle types and multipoles forward
- * in time.
- *
- * @param map_data An array of #cell%s.
- * @param num_elements Chunk size.
- * @param extra_data Pointer to an #engine.
- */
-void engine_do_drift_all_mapper(void *map_data, int num_elements,
-                                void *extra_data) {
-
-  struct engine *e = (struct engine *)extra_data;
-  struct cell *cells = (struct cell *)map_data;
-
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct cell *c = &cells[ind];
-    if (c != NULL && c->nodeID == e->nodeID) {
-      /* Drift all the particles */
-      cell_drift_part(c, e, 1);
-
-      /* Drift all the g-particles */
-      cell_drift_gpart(c, e, 1);
-    }
-
-    /* Drift the multipoles */
-    if (e->policy & engine_policy_self_gravity) {
-      cell_drift_all_multipoles(c, e);
-    }
-  }
-}
-
-/**
- * @brief Drift *all* particles and multipoles at all levels
- * forward to the current time.
- *
- * @param e The #engine.
- */
-void engine_drift_all(struct engine *e) {
-
-  const ticks tic = getticks();
-
-#ifdef SWIFT_DEBUG_CHECKS
-  if (e->nodeID == 0) {
-    if (e->policy & engine_policy_cosmology)
-      message("Drifting all to a=%e",
-              exp(e->ti_current * e->time_base) * e->cosmology->a_begin);
-    else
-      message("Drifting all to t=%e",
-              e->ti_current * e->time_base + e->time_begin);
-  }
-#endif
-
-  threadpool_map(&e->threadpool, engine_do_drift_all_mapper, e->s->cells_top,
-                 e->s->nr_cells, sizeof(struct cell), 0, e);
-
-  /* Synchronize particle positions */
-  space_synchronize_particle_positions(e->s);
+    /* Dump when the time has arrived, or we are told to. */
+    int dump = ((tic > e->restart_next) || force);
 
-#ifdef SWIFT_DEBUG_CHECKS
-  /* Check that all cells have been drifted to the current time. */
-  space_check_drift_point(e->s, e->ti_current,
-                          e->policy & engine_policy_self_gravity);
-  part_verify_links(e->s->parts, e->s->gparts, e->s->sparts, e->s->nr_parts,
-                    e->s->nr_gparts, e->s->nr_sparts, e->verbose);
+#ifdef WITH_MPI
+    /* Synchronize this action from rank 0 (ticks may differ between
+     * machines). */
+    MPI_Bcast(&dump, 1, MPI_INT, 0, MPI_COMM_WORLD);
 #endif
+    if (dump) {
 
-  if (e->verbose)
-    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
-            clocks_getunit());
-}
+      if (e->nodeID == 0) message("Writing restart files");
 
-/**
- * @brief Mapper function to drift *all* top-level multipoles forward in
- * time.
- *
- * @param map_data An array of #cell%s.
- * @param num_elements Chunk size.
- * @param extra_data Pointer to an #engine.
- */
-void engine_do_drift_top_multipoles_mapper(void *map_data, int num_elements,
-                                           void *extra_data) {
+      /* Clean out the previous saved files, if found. Do this now as we are
+       * MPI synchronized. */
+      restart_remove_previous(e->restart_file);
 
-  struct engine *e = (struct engine *)extra_data;
-  struct cell *cells = (struct cell *)map_data;
+      /* Drift all particles first (may have just been done). */
+      if (!drifted_all) engine_drift_all(e, /*drift_mpole=*/1);
+      restart_write(e, e->restart_file);
 
-  for (int ind = 0; ind < num_elements; ind++) {
-    struct cell *c = &cells[ind];
-    if (c != NULL) {
+      if (e->verbose)
+        message("Dumping restart files took %.3f %s",
+                clocks_from_ticks(getticks() - tic), clocks_getunit());
 
-      /* Drift the multipole at this level only */
-      if (c->ti_old_multipole != e->ti_current) cell_drift_multipole(c, e);
+      /* Time after which next dump will occur. */
+      e->restart_next += e->restart_dt;
+
+      /* Flag that we dumped the restarts */
+      e->step_props |= engine_step_prop_restarts;
     }
   }
 }
 
 /**
- * @brief Drift *all* top-level multipoles forward to the current time.
+ * @brief Returns 1 if the simulation has reached its end point, 0 otherwise
+ */
+int engine_is_done(struct engine *e) {
+  return !(e->ti_current < max_nr_timesteps);
+}
+
+/**
+ * @brief Unskip all the tasks that act on active cells at this time.
  *
  * @param e The #engine.
  */
-void engine_drift_top_multipoles(struct engine *e) {
+void engine_unskip(struct engine *e) {
 
   const ticks tic = getticks();
+  struct space *s = e->s;
 
-  threadpool_map(&e->threadpool, engine_do_drift_top_multipoles_mapper,
-                 e->s->cells_top, e->s->nr_cells, sizeof(struct cell), 0, e);
+#ifdef WITH_PROFILER
+  static int count = 0;
+  char filename[100];
+  sprintf(filename, "/tmp/swift_runner_do_usnkip_mapper_%06i.prof", count++);
+  ProfilerStart(filename);
+#endif  // WITH_PROFILER
 
-#ifdef SWIFT_DEBUG_CHECKS
-  /* Check that all cells have been drifted to the current time. */
-  space_check_top_multipoles_drift_point(e->s, e->ti_current);
-#endif
+  /* Move the active local cells to the top of the list. */
+  int *local_cells = e->s->local_cells_with_tasks_top;
+  int num_active_cells = 0;
+  for (int k = 0; k < s->nr_local_cells_with_tasks; k++) {
+    struct cell *c = &s->cells_top[local_cells[k]];
+
+    if ((e->policy & engine_policy_hydro && cell_is_active_hydro(c, e)) ||
+        (e->policy & engine_policy_self_gravity &&
+         cell_is_active_gravity(c, e)) ||
+        (e->policy & engine_policy_external_gravity &&
+         cell_is_active_gravity(c, e))) {
+
+      if (num_active_cells != k)
+        memswap(&local_cells[k], &local_cells[num_active_cells], sizeof(int));
+      num_active_cells += 1;
+    }
+  }
+
+  /* Activate all the regular tasks */
+  threadpool_map(&e->threadpool, runner_do_unskip_mapper, local_cells,
+                 num_active_cells, sizeof(int), 1, e);
+
+#ifdef WITH_PROFILER
+  ProfilerStop();
+#endif  // WITH_PROFILER
 
   if (e->verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
@@ -5306,24 +3603,43 @@ void engine_reconstruct_multipoles(struct engine *e) {
 void engine_makeproxies(struct engine *e) {
 
 #ifdef WITH_MPI
+  /* Let's time this */
+  const ticks tic = getticks();
+
+  /* Useful local information */
   const int nodeID = e->nodeID;
   const struct space *s = e->s;
-  const int *cdim = s->cdim;
+
+  /* Handle on the cells and proxies */
+  struct cell *cells = s->cells_top;
+  struct proxy *proxies = e->proxies;
+
+  /* Some info about the domain */
+  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
+  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
   const int periodic = s->periodic;
+  const double cell_width[3] = {cells[0].width[0], cells[0].width[1],
+                                cells[0].width[2]};
 
   /* Get some info about the physics */
-  const double *dim = s->dim;
-  const struct gravity_props *props = e->gravity_properties;
-  const double theta_crit2 = props->theta_crit2;
   const int with_hydro = (e->policy & engine_policy_hydro);
   const int with_gravity = (e->policy & engine_policy_self_gravity);
+  const double theta_crit_inv = e->gravity_properties->theta_crit_inv;
+  const double theta_crit2 = e->gravity_properties->theta_crit2;
+  const double max_mesh_dist = e->mesh->r_cut_max;
+  const double max_mesh_dist2 = max_mesh_dist * max_mesh_dist;
 
-  /* Handle on the cells and proxies */
-  struct cell *cells = s->cells_top;
-  struct proxy *proxies = e->proxies;
+  /* Distance between centre of the cell and corners */
+  const double r_diag2 = cell_width[0] * cell_width[0] +
+                         cell_width[1] * cell_width[1] +
+                         cell_width[2] * cell_width[2];
+  const double r_diag = 0.5 * sqrt(r_diag2);
 
-  /* Let's time this */
-  const ticks tic = getticks();
+  /* Maximal distance from a shifted CoM to centre of cell */
+  const double delta_CoM = engine_max_proxy_centre_frac * r_diag;
+
+  /* Maximal distance from shifted CoM to any corner */
+  const double r_max = r_diag + 2. * delta_CoM;
 
   /* Prepare the proxies and the proxy index. */
   if (e->proxy_ind == NULL)
@@ -5333,63 +3649,63 @@ void engine_makeproxies(struct engine *e) {
   e->nr_proxies = 0;
 
   /* Compute how many cells away we need to walk */
-  int delta = 1; /*hydro case */
+  int delta_cells = 1; /*hydro case */
+
+  /* Gravity needs to take the opening angle into account */
   if (with_gravity) {
-    const double distance = 2.5 * cells[0].width[0] / props->theta_crit;
-    delta = (int)(distance / cells[0].width[0]) + 1;
+    const double distance = 2. * r_max * theta_crit_inv;
+    delta_cells = (int)(distance / cells[0].dmin) + 1;
+  }
+
+  /* Turn this into upper and lower bounds for loops */
+  int delta_m = delta_cells;
+  int delta_p = delta_cells;
+
+  /* Special case where every cell is in range of every other one */
+  if (delta_cells >= cdim[0] / 2) {
+    if (cdim[0] % 2 == 0) {
+      delta_m = cdim[0] / 2;
+      delta_p = cdim[0] / 2 - 1;
+    } else {
+      delta_m = cdim[0] / 2;
+      delta_p = cdim[0] / 2;
+    }
   }
 
   /* Let's be verbose about this choice */
   if (e->verbose)
-    message("Looking for proxies up to %d top-level cells away", delta);
+    message(
+        "Looking for proxies up to %d top-level cells away (delta_m=%d "
+        "delta_p=%d)",
+        delta_cells, delta_m, delta_p);
 
   /* Loop over each cell in the space. */
-  int ind[3];
-  for (ind[0] = 0; ind[0] < cdim[0]; ind[0]++) {
-    for (ind[1] = 0; ind[1] < cdim[1]; ind[1]++) {
-      for (ind[2] = 0; ind[2] < cdim[2]; ind[2]++) {
+  for (int i = 0; i < cdim[0]; i++) {
+    for (int j = 0; j < cdim[1]; j++) {
+      for (int k = 0; k < cdim[2]; k++) {
 
         /* Get the cell ID. */
-        const int cid = cell_getid(cdim, ind[0], ind[1], ind[2]);
-
-        double CoM_i[3] = {0., 0., 0.};
-        double r_max_i = 0.;
-
-        if (with_gravity) {
-
-          /* Get ci's multipole */
-          const struct gravity_tensors *multi_i = cells[cid].multipole;
-          CoM_i[0] = multi_i->CoM[0];
-          CoM_i[1] = multi_i->CoM[1];
-          CoM_i[2] = multi_i->CoM[2];
-          r_max_i = multi_i->r_max;
-        }
-
-        /* Loop over all its neighbours (periodic). */
-        for (int i = -delta; i <= delta; i++) {
-          int ii = ind[0] + i;
-          if (ii >= cdim[0])
-            ii -= cdim[0];
-          else if (ii < 0)
-            ii += cdim[0];
-          for (int j = -delta; j <= delta; j++) {
-            int jj = ind[1] + j;
-            if (jj >= cdim[1])
-              jj -= cdim[1];
-            else if (jj < 0)
-              jj += cdim[1];
-            for (int k = -delta; k <= delta; k++) {
-              int kk = ind[2] + k;
-              if (kk >= cdim[2])
-                kk -= cdim[2];
-              else if (kk < 0)
-                kk += cdim[2];
+        const int cid = cell_getid(cdim, i, j, k);
+
+        /* Loop over all its neighbours neighbours in range. */
+        for (int ii = -delta_m; ii <= delta_p; ii++) {
+          int iii = i + ii;
+          if (!periodic && (iii < 0 || iii >= cdim[0])) continue;
+          iii = (iii + cdim[0]) % cdim[0];
+          for (int jj = -delta_m; jj <= delta_p; jj++) {
+            int jjj = j + jj;
+            if (!periodic && (jjj < 0 || jjj >= cdim[1])) continue;
+            jjj = (jjj + cdim[1]) % cdim[1];
+            for (int kk = -delta_m; kk <= delta_p; kk++) {
+              int kkk = k + kk;
+              if (!periodic && (kkk < 0 || kkk >= cdim[2])) continue;
+              kkk = (kkk + cdim[2]) % cdim[2];
 
               /* Get the cell ID. */
-              const int cjd = cell_getid(cdim, ii, jj, kk);
+              const int cjd = cell_getid(cdim, iii, jjj, kkk);
 
-              /* Early abort (same cell) */
-              if (cid == cjd) continue;
+              /* Early abort  */
+              if (cid >= cjd) continue;
 
               /* Early abort (both same node) */
               if (cells[cid].nodeID == nodeID && cells[cjd].nodeID == nodeID)
@@ -5401,48 +3717,74 @@ void engine_makeproxies(struct engine *e) {
 
               int proxy_type = 0;
 
-              /* In the hydro case, only care about neighbours */
+              /* In the hydro case, only care about direct neighbours */
               if (with_hydro) {
 
+                // MATTHIEU: to do: Write a better expression for the
+                // non-periodic case.
+
                 /* This is super-ugly but checks for direct neighbours */
                 /* with periodic BC */
-                if (((abs(ind[0] - ii) <= 1 ||
-                      abs(ind[0] - ii - cdim[0]) <= 1 ||
-                      abs(ind[0] - ii + cdim[0]) <= 1) &&
-                     (abs(ind[1] - jj) <= 1 ||
-                      abs(ind[1] - jj - cdim[1]) <= 1 ||
-                      abs(ind[1] - jj + cdim[1]) <= 1) &&
-                     (abs(ind[2] - kk) <= 1 ||
-                      abs(ind[2] - kk - cdim[2]) <= 1 ||
-                      abs(ind[2] - kk + cdim[2]) <= 1)))
+                if (((abs(i - iii) <= 1 || abs(i - iii - cdim[0]) <= 1 ||
+                      abs(i - iii + cdim[0]) <= 1) &&
+                     (abs(j - jjj) <= 1 || abs(j - jjj - cdim[1]) <= 1 ||
+                      abs(j - jjj + cdim[1]) <= 1) &&
+                     (abs(k - kkk) <= 1 || abs(k - kkk - cdim[2]) <= 1 ||
+                      abs(k - kkk + cdim[2]) <= 1)))
                   proxy_type |= (int)proxy_cell_type_hydro;
               }
 
               /* In the gravity case, check distances using the MAC. */
               if (with_gravity) {
 
-                /* Get cj's multipole */
-                const struct gravity_tensors *multi_j = cells[cjd].multipole;
-                const double CoM_j[3] = {multi_j->CoM[0], multi_j->CoM[1],
-                                         multi_j->CoM[2]};
-                const double r_max_j = multi_j->r_max;
-
-                /* Let's compute the current distance between the cell pair*/
-                double dx = CoM_i[0] - CoM_j[0];
-                double dy = CoM_i[1] - CoM_j[1];
-                double dz = CoM_i[2] - CoM_j[2];
-
-                /* Apply BC */
-                if (periodic) {
-                  dx = nearest(dx, dim[0]);
-                  dy = nearest(dy, dim[1]);
-                  dz = nearest(dz, dim[2]);
-                }
-                const double r2 = dx * dx + dy * dy + dz * dz;
+                /* First just add the direct neighbours. Then look for
+                   some further out if the opening angle demands it */
+
+                /* This is super-ugly but checks for direct neighbours */
+                /* with periodic BC */
+                if (((abs(i - iii) <= 1 || abs(i - iii - cdim[0]) <= 1 ||
+                      abs(i - iii + cdim[0]) <= 1) &&
+                     (abs(j - jjj) <= 1 || abs(j - jjj - cdim[1]) <= 1 ||
+                      abs(j - jjj + cdim[1]) <= 1) &&
+                     (abs(k - kkk) <= 1 || abs(k - kkk - cdim[2]) <= 1 ||
+                      abs(k - kkk + cdim[2]) <= 1))) {
 
-                /* Are we too close for M2L? */
-                if (!gravity_M2L_accept(r_max_i, r_max_j, theta_crit2, r2))
                   proxy_type |= (int)proxy_cell_type_gravity;
+                } else {
+
+                  /* We don't have multipoles yet (or there CoMs) so we will
+                     have to cook up something based on cell locations only. We
+                     hence need an upper limit on the distance that the CoMs in
+                     those cells could have. We then can decide whether we are
+                     too close for an M2L interaction and hence require a proxy
+                     as this pair of cells cannot rely on just an M2L
+                     calculation. */
+
+                  /* Minimal distance between any two points in the cells */
+                  const double min_dist_centres2 = cell_min_dist2_same_size(
+                      &cells[cid], &cells[cjd], periodic, dim);
+
+                  /* Let's now assume the CoMs will shift a bit */
+                  const double min_dist_CoM =
+                      sqrt(min_dist_centres2) - 2. * delta_CoM;
+                  const double min_dist_CoM2 = min_dist_CoM * min_dist_CoM;
+
+                  /* Are we beyond the distance where the truncated forces are 0
+                   * but not too far such that M2L can be used? */
+                  if (periodic) {
+
+                    if ((min_dist_CoM2 < max_mesh_dist2) &&
+                        (!gravity_M2L_accept(r_max, r_max, theta_crit2,
+                                             min_dist_CoM2)))
+                      proxy_type |= (int)proxy_cell_type_gravity;
+
+                  } else {
+
+                    if (!gravity_M2L_accept(r_max, r_max, theta_crit2,
+                                            min_dist_CoM2))
+                      proxy_type |= (int)proxy_cell_type_gravity;
+                  }
+                }
               }
 
               /* Abort if not in range at all */
@@ -5452,8 +3794,8 @@ void engine_makeproxies(struct engine *e) {
               if (cells[cid].nodeID == nodeID && cells[cjd].nodeID != nodeID) {
 
                 /* Do we already have a relationship with this node? */
-                int pid = e->proxy_ind[cells[cjd].nodeID];
-                if (pid < 0) {
+                int proxy_id = e->proxy_ind[cells[cjd].nodeID];
+                if (proxy_id < 0) {
                   if (e->nr_proxies == engine_maxproxies)
                     error("Maximum number of proxies exceeded.");
 
@@ -5463,24 +3805,31 @@ void engine_makeproxies(struct engine *e) {
 
                   /* Store the information */
                   e->proxy_ind[cells[cjd].nodeID] = e->nr_proxies;
-                  pid = e->nr_proxies;
+                  proxy_id = e->nr_proxies;
                   e->nr_proxies += 1;
+
+                  /* Check the maximal proxy limit */
+                  if ((size_t)proxy_id > 8 * sizeof(long long))
+                    error(
+                        "Created more than %zd proxies. cell.mpi.sendto will "
+                        "overflow.",
+                        8 * sizeof(long long));
                 }
 
                 /* Add the cell to the proxy */
-                proxy_addcell_in(&proxies[pid], &cells[cjd], proxy_type);
-                proxy_addcell_out(&proxies[pid], &cells[cid], proxy_type);
+                proxy_addcell_in(&proxies[proxy_id], &cells[cjd], proxy_type);
+                proxy_addcell_out(&proxies[proxy_id], &cells[cid], proxy_type);
 
                 /* Store info about where to send the cell */
-                cells[cid].sendto |= (1ULL << pid);
+                cells[cid].mpi.sendto |= (1ULL << proxy_id);
               }
 
               /* Same for the symmetric case? */
               if (cells[cjd].nodeID == nodeID && cells[cid].nodeID != nodeID) {
 
                 /* Do we already have a relationship with this node? */
-                int pid = e->proxy_ind[cells[cid].nodeID];
-                if (pid < 0) {
+                int proxy_id = e->proxy_ind[cells[cid].nodeID];
+                if (proxy_id < 0) {
                   if (e->nr_proxies == engine_maxproxies)
                     error("Maximum number of proxies exceeded.");
 
@@ -5490,16 +3839,23 @@ void engine_makeproxies(struct engine *e) {
 
                   /* Store the information */
                   e->proxy_ind[cells[cid].nodeID] = e->nr_proxies;
-                  pid = e->nr_proxies;
+                  proxy_id = e->nr_proxies;
                   e->nr_proxies += 1;
+
+                  /* Check the maximal proxy limit */
+                  if ((size_t)proxy_id > 8 * sizeof(long long))
+                    error(
+                        "Created more than %zd proxies. cell.mpi.sendto will "
+                        "overflow.",
+                        8 * sizeof(long long));
                 }
 
                 /* Add the cell to the proxy */
-                proxy_addcell_in(&proxies[pid], &cells[cid], proxy_type);
-                proxy_addcell_out(&proxies[pid], &cells[cjd], proxy_type);
+                proxy_addcell_in(&proxies[proxy_id], &cells[cid], proxy_type);
+                proxy_addcell_out(&proxies[proxy_id], &cells[cjd], proxy_type);
 
                 /* Store info about where to send the cell */
-                cells[cjd].sendto |= (1ULL << pid);
+                cells[cjd].mpi.sendto |= (1ULL << proxy_id);
               }
             }
           }
@@ -5526,6 +3882,8 @@ void engine_makeproxies(struct engine *e) {
 void engine_split(struct engine *e, struct partition *initial_partition) {
 
 #ifdef WITH_MPI
+  const ticks tic = getticks();
+
   struct space *s = e->s;
 
   /* Do the initial partition of the cells. */
@@ -5537,8 +3895,8 @@ void engine_split(struct engine *e, struct partition *initial_partition) {
   /* Re-allocate the local parts. */
   if (e->verbose)
     message("Re-allocating parts array from %zu to %zu.", s->size_parts,
-            (size_t)(s->nr_parts * 1.2));
-  s->size_parts = s->nr_parts * 1.2;
+            (size_t)(s->nr_parts * engine_redistribute_alloc_margin));
+  s->size_parts = s->nr_parts * engine_redistribute_alloc_margin;
   struct part *parts_new = NULL;
   struct xpart *xparts_new = NULL;
   if (posix_memalign((void **)&parts_new, part_align,
@@ -5565,8 +3923,8 @@ void engine_split(struct engine *e, struct partition *initial_partition) {
   /* Re-allocate the local sparts. */
   if (e->verbose)
     message("Re-allocating sparts array from %zu to %zu.", s->size_sparts,
-            (size_t)(s->nr_sparts * 1.2));
-  s->size_sparts = s->nr_sparts * 1.2;
+            (size_t)(s->nr_sparts * engine_redistribute_alloc_margin));
+  s->size_sparts = s->nr_sparts * engine_redistribute_alloc_margin;
   struct spart *sparts_new = NULL;
   if (posix_memalign((void **)&sparts_new, spart_align,
                      sizeof(struct spart) * s->size_sparts) != 0)
@@ -5585,8 +3943,8 @@ void engine_split(struct engine *e, struct partition *initial_partition) {
   /* Re-allocate the local gparts. */
   if (e->verbose)
     message("Re-allocating gparts array from %zu to %zu.", s->size_gparts,
-            (size_t)(s->nr_gparts * 1.2));
-  s->size_gparts = s->nr_gparts * 1.2;
+            (size_t)(s->nr_gparts * engine_redistribute_alloc_margin));
+  s->size_gparts = s->nr_gparts * engine_redistribute_alloc_margin;
   struct gpart *gparts_new = NULL;
   if (posix_memalign((void **)&gparts_new, gpart_align,
                      sizeof(struct gpart) * s->size_gparts) != 0)
@@ -5613,11 +3971,87 @@ void engine_split(struct engine *e, struct partition *initial_partition) {
                     s->nr_sparts, e->verbose);
 #endif
 
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+
 #else
   error("SWIFT was not compiled with MPI support.");
 #endif
 }
 
+#ifdef DEBUG_INTERACTIONS_STARS
+/**
+ * @brief Exchange the feedback counters between stars
+ * @param e The #engine.
+ */
+void engine_collect_stars_counter(struct engine *e) {
+
+#ifdef WITH_MPI
+  if (e->total_nr_sparts > 1e5) {
+    message("WARNING: too many sparts, skipping exchange of counters");
+    return;
+  }
+
+  /* Get number of sparticles for each rank */
+  size_t *n_sparts = (size_t *)malloc(e->nr_nodes * sizeof(size_t));
+
+  int err = MPI_Allgather(&e->s->nr_sparts_foreign, 1, MPI_UNSIGNED_LONG,
+                          n_sparts, 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
+  if (err != MPI_SUCCESS) error("Communication failed");
+
+  /* Compute derivated quantities */
+  int total = 0;
+  int *n_sparts_int = (int *)malloc(e->nr_nodes * sizeof(int));
+  int *displs = (int *)malloc(e->nr_nodes * sizeof(int));
+  for (int i = 0; i < e->nr_nodes; i++) {
+    displs[i] = total;
+    total += n_sparts[i];
+    n_sparts_int[i] = n_sparts[i];
+  }
+
+  /* Get all sparticles */
+  struct spart *sparts = (struct spart *)malloc(total * sizeof(struct spart));
+  err = MPI_Allgatherv(e->s->sparts_foreign, e->s->nr_sparts_foreign,
+                       spart_mpi_type, sparts, n_sparts_int, displs,
+                       spart_mpi_type, MPI_COMM_WORLD);
+  if (err != MPI_SUCCESS) error("Communication failed");
+
+  /* Reset counters */
+  for (size_t i = 0; i < e->s->nr_sparts_foreign; i++) {
+    e->s->sparts_foreign[i].num_ngb_force = 0;
+  }
+
+  /* Update counters */
+  struct spart *local_sparts = e->s->sparts;
+  for (size_t i = 0; i < e->s->nr_sparts; i++) {
+    const long long id_i = local_sparts[i].id;
+
+    for (int j = 0; j < total; j++) {
+      const long long id_j = sparts[j].id;
+
+      if (id_j == id_i) {
+        if (j >= displs[engine_rank] &&
+            j < displs[engine_rank] + n_sparts_int[engine_rank]) {
+          error(
+              "Found a local spart in foreign cell ID=%lli: j=%i, displs=%i, "
+              "n_sparts=%i",
+              id_j, j, displs[engine_rank], n_sparts_int[engine_rank]);
+        }
+
+        local_sparts[i].num_ngb_force += sparts[j].num_ngb_force;
+      }
+    }
+  }
+
+  free(n_sparts);
+  free(n_sparts_in);
+  free(sparts);
+#endif
+}
+
+#endif
+
 /**
  * @brief Writes a snapshot with the current state of the engine
  *
@@ -5632,8 +4066,7 @@ void engine_dump_snapshot(struct engine *e) {
   /* Check that all cells have been drifted to the current time.
    * That can include cells that have not
    * previously been active on this rank. */
-  space_check_drift_point(e->s, e->ti_current,
-                          e->policy & engine_policy_self_gravity);
+  space_check_drift_point(e->s, e->ti_current, /* check_mpole=*/0);
 
   /* Be verbose about this */
   if (e->nodeID == 0) {
@@ -5655,6 +4088,10 @@ void engine_dump_snapshot(struct engine *e) {
   }
 #endif
 
+#ifdef DEBUG_INTERACTIONS_STARS
+  engine_collect_stars_counter(e);
+#endif
+
 /* Dump... */
 #if defined(HAVE_HDF5)
 #if defined(WITH_MPI)
@@ -5682,6 +4119,42 @@ void engine_dump_snapshot(struct engine *e) {
             (float)clocks_diff(&time1, &time2), clocks_getunit());
 }
 
+/**
+ * @brief Writes an index file with the current state of the engine
+ *
+ * @param e The #engine.
+ */
+void engine_dump_index(struct engine *e) {
+
+#if defined(WITH_LOGGER)
+  struct clocks_time time1, time2;
+  clocks_gettime(&time1);
+
+  if (e->verbose) {
+    if (e->policy & engine_policy_cosmology)
+      message("Writing index at a=%e",
+              exp(e->ti_current * e->time_base) * e->cosmology->a_begin);
+    else
+      message("Writing index at t=%e",
+              e->ti_current * e->time_base + e->time_begin);
+  }
+
+  /* Dump... */
+  write_index_single(e, e->logger->base_name, e->internal_units,
+                     e->snapshot_units);
+
+  /* Flag that we dumped a snapshot */
+  e->step_props |= engine_step_prop_logger_index;
+
+  clocks_gettime(&time2);
+  if (e->verbose)
+    message("writing particle indices took %.3f %s.",
+            (float)clocks_diff(&time1, &time2), clocks_getunit());
+#else
+  error("SWIFT was not compiled with the logger");
+#endif
+}
+
 #ifdef HAVE_SETAFFINITY
 /**
  * @brief Returns the initial affinity the main thread is using.
@@ -5757,24 +4230,28 @@ void engine_unpin(void) {
  * @param physical_constants The #phys_const used for this run.
  * @param cosmo The #cosmology used for this run.
  * @param hydro The #hydro_props used for this run.
+ * @param entropy_floor The #entropy_floor_properties for this run.
  * @param gravity The #gravity_props used for this run.
+ * @param stars The #stars_props used for this run.
  * @param mesh The #pm_mesh used for the long-range periodic forces.
  * @param potential The properties of the external potential.
  * @param cooling_func The properties of the cooling function.
+ * @param starform The #star_formation model of this run.
  * @param chemistry The chemistry information.
- * @param sourceterms The properties of the source terms function.
  */
 void engine_init(struct engine *e, struct space *s, struct swift_params *params,
                  long long Ngas, long long Ngparts, long long Nstars,
                  int policy, int verbose, struct repartition *reparttype,
                  const struct unit_system *internal_units,
                  const struct phys_const *physical_constants,
-                 struct cosmology *cosmo, const struct hydro_props *hydro,
-                 struct gravity_props *gravity, struct pm_mesh *mesh,
+                 struct cosmology *cosmo, struct hydro_props *hydro,
+                 const struct entropy_floor_properties *entropy_floor,
+                 struct gravity_props *gravity, const struct stars_props *stars,
+                 struct pm_mesh *mesh,
                  const struct external_potential *potential,
-                 const struct cooling_function_data *cooling_func,
-                 const struct chemistry_global_data *chemistry,
-                 struct sourceterms *sourceterms) {
+                 struct cooling_function_data *cooling_func,
+                 const struct star_formation *starform,
+                 const struct chemistry_global_data *chemistry) {
 
   /* Clean-up everything */
   bzero(e, sizeof(struct engine));
@@ -5809,15 +4286,14 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params,
   parser_get_param_string(params, "Snapshots:basename", e->snapshot_base_name);
   e->snapshot_compression =
       parser_get_opt_param_int(params, "Snapshots:compression", 0);
-  e->snapshot_label_first =
-      parser_get_opt_param_int(params, "Snapshots:label_first", 0);
-  if (e->snapshot_label_first < 0)
-    error("Snapshots:label_first must be zero or positive");
-  e->snapshot_label_delta =
-      parser_get_opt_param_int(params, "Snapshots:label_delta", 1);
+  e->snapshot_int_time_label_on =
+      parser_get_opt_param_int(params, "Snapshots:int_time_label_on", 0);
+  e->snapshot_invoke_stf =
+      parser_get_opt_param_int(params, "Snapshots:invoke_stf", 0);
   e->snapshot_units = (struct unit_system *)malloc(sizeof(struct unit_system));
   units_init_default(e->snapshot_units, params, "Snapshots", internal_units);
   e->snapshot_output_count = 0;
+  e->stf_output_count = 0;
   e->dt_min = parser_get_param_double(params, "TimeIntegration:dt_min");
   e->dt_max = parser_get_param_double(params, "TimeIntegration:dt_max");
   e->dt_max_RMS_displacement = FLT_MAX;
@@ -5835,18 +4311,26 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params,
   e->physical_constants = physical_constants;
   e->cosmology = cosmo;
   e->hydro_properties = hydro;
+  e->entropy_floor = entropy_floor;
   e->gravity_properties = gravity;
+  e->stars_properties = stars;
   e->mesh = mesh;
   e->external_potential = potential;
   e->cooling_func = cooling_func;
+  e->star_formation = starform;
   e->chemistry = chemistry;
-  e->sourceterms = sourceterms;
   e->parameter_file = params;
-  e->cell_loc = NULL;
 #ifdef WITH_MPI
   e->cputime_last_step = 0;
   e->last_repartition = 0;
 #endif
+  e->total_nr_cells = 0;
+  e->total_nr_tasks = 0;
+
+#if defined(WITH_LOGGER)
+  e->logger = (struct logger *)malloc(sizeof(struct logger));
+  logger_init(e->logger, params);
+#endif
 
   /* Make the space link back to the engine. */
   s->e = e;
@@ -5875,6 +4359,21 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params,
     e->ti_current = 0;
   }
 
+  /* Initialise VELOCIraptor output. */
+  if (e->policy & engine_policy_structure_finding) {
+    parser_get_param_string(params, "StructureFinding:basename",
+                            e->stf_base_name);
+    parser_get_param_string(params, "StructureFinding:config_file_name",
+                            e->stf_config_file_name);
+
+    e->time_first_stf_output =
+        parser_get_opt_param_double(params, "StructureFinding:time_first", 0.);
+    e->a_first_stf_output = parser_get_opt_param_double(
+        params, "StructureFinding:scale_factor_first", 0.1);
+    e->delta_time_stf =
+        parser_get_opt_param_double(params, "StructureFinding:delta_time", -1.);
+  }
+
   engine_init_output_lists(e, params);
 }
 
@@ -5925,34 +4424,8 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
   e->restart_file = restart_file;
   e->restart_next = 0;
   e->restart_dt = 0;
-  e->timeFirstSTFOutput = 0;
   engine_rank = nodeID;
 
-  /* Initialise VELOCIraptor. */
-  if (e->policy & engine_policy_structure_finding) {
-    parser_get_param_string(params, "StructureFinding:basename",
-                            e->stfBaseName);
-    e->timeFirstSTFOutput =
-        parser_get_param_double(params, "StructureFinding:time_first");
-    e->a_first_stf = parser_get_opt_param_double(
-        params, "StructureFinding:scale_factor_first", 0.1);
-    e->stf_output_freq_format =
-        parser_get_param_int(params, "StructureFinding:output_time_format");
-    if (e->stf_output_freq_format == STEPS) {
-      e->deltaStepSTF =
-          parser_get_param_int(params, "StructureFinding:delta_step");
-    } else if (e->stf_output_freq_format == TIME) {
-      e->deltaTimeSTF =
-          parser_get_param_double(params, "StructureFinding:delta_time");
-    } else
-      error(
-          "Invalid flag (%d) set for output time format of structure finding.",
-          e->stf_output_freq_format);
-
-    /* overwrite input if outputlist */
-    if (e->output_list_stf) e->stf_output_freq_format = TIME;
-  }
-
   /* Get the number of queues */
   int nr_queues =
       parser_get_opt_param_int(params, "Scheduler:nr_queues", nr_threads);
@@ -6144,13 +4617,14 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
 
       fprintf(e->file_timesteps,
               "# Step Properties: Rebuild=%d, Redistribute=%d, Repartition=%d, "
-              "Statistics=%d, Snapshot=%d, Restarts=%d\n",
+              "Statistics=%d, Snapshot=%d, Restarts=%d STF=%d, logger=%d\n",
               engine_step_prop_rebuild, engine_step_prop_redistribute,
               engine_step_prop_repartition, engine_step_prop_statistics,
-              engine_step_prop_snapshot, engine_step_prop_restarts);
+              engine_step_prop_snapshot, engine_step_prop_restarts,
+              engine_step_prop_stf, engine_step_prop_logger_index);
 
       fprintf(e->file_timesteps,
-              "# %6s %14s %14s %10s %14s %9s %12s %12s %12s %16s [%s] %6s\n",
+              "# %6s %14s %12s %12s %14s %9s %12s %12s %12s %16s [%s] %6s\n",
               "Step", "Time", "Scale-factor", "Redshift", "Time-step",
               "Time-bins", "Updates", "g-Updates", "s-Updates",
               "Wall-clock time", clocks_getunit(), "Props");
@@ -6162,13 +4636,18 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
   engine_print_policy(e);
 
   /* Print information about the hydro scheme */
-  if (e->policy & engine_policy_hydro)
+  if (e->policy & engine_policy_hydro) {
     if (e->nodeID == 0) hydro_props_print(e->hydro_properties);
+    if (e->nodeID == 0) entropy_floor_print(e->entropy_floor);
+  }
 
   /* Print information about the gravity scheme */
   if (e->policy & engine_policy_self_gravity)
     if (e->nodeID == 0) gravity_props_print(e->gravity_properties);
 
+  if (e->policy & engine_policy_stars)
+    if (e->nodeID == 0) stars_props_print(e->stars_properties);
+
   /* Check we have sensible time bounds */
   if (e->time_begin >= e->time_end)
     error(
@@ -6176,6 +4655,10 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
         "(t_beg = %e)",
         e->time_end, e->time_begin);
 
+  /* Check we don't have inappropriate time labels */
+  if ((e->snapshot_int_time_label_on == 1) && (e->time_end <= 1.f))
+    error("Snapshot integer time labels enabled but end time <= 1");
+
   /* Check we have sensible time-step values */
   if (e->dt_min > e->dt_max)
     error(
@@ -6231,18 +4714,19 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
           "simulation start a=%e.",
           e->a_first_statistics, e->cosmology->a_begin);
 
-    if ((e->policy & engine_policy_structure_finding) &&
-        (e->stf_output_freq_format == TIME)) {
+    if (e->policy & engine_policy_structure_finding) {
 
-      if (e->deltaTimeSTF <= 1.)
-        error("Time between STF (%e) must be > 1.", e->deltaTimeSTF);
+      if (e->delta_time_stf == -1. && !e->snapshot_invoke_stf)
+        error("A value for `StructureFinding:delta_time` must be specified");
 
-      if (e->a_first_stf < e->cosmology->a_begin)
+      if (e->delta_time_stf <= 1. && e->delta_time_stf != -1.)
+        error("Time between STF (%e) must be > 1.", e->delta_time_stf);
+
+      if (e->a_first_stf_output < e->cosmology->a_begin)
         error(
             "Scale-factor of first stf output (%e) must be after the "
-            "simulation "
-            "start a=%e.",
-            e->a_first_stf, e->cosmology->a_begin);
+            "simulation start a=%e.",
+            e->a_first_stf_output, e->cosmology->a_begin);
     }
   } else {
 
@@ -6267,23 +4751,17 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
           "t=%e.",
           e->time_first_statistics, e->time_begin);
 
-    if ((e->policy & engine_policy_structure_finding) &&
-        (e->stf_output_freq_format == TIME)) {
+    if (e->policy & engine_policy_structure_finding) {
 
-      if (e->deltaTimeSTF <= 0.)
-        error("Time between STF (%e) must be positive.", e->deltaTimeSTF);
+      if (e->delta_time_stf == -1. && !e->snapshot_invoke_stf)
+        error("A value for `StructureFinding:delta_time` must be specified");
 
-      if (e->timeFirstSTFOutput < e->time_begin)
-        error("Time of first STF (%e) must be after the simulation start t=%e.",
-              e->timeFirstSTFOutput, e->time_begin);
-    }
-  }
+      if (e->delta_time_stf <= 0. && e->delta_time_stf != -1.)
+        error("Time between STF (%e) must be positive.", e->delta_time_stf);
 
-  if (e->policy & engine_policy_structure_finding) {
-    /* Find the time of the first stf output */
-    if (e->stf_output_freq_format == TIME) {
-      engine_compute_next_stf_time(e);
-      message("Next STF step will be: %lld", e->ti_nextSTF);
+      if (e->time_first_stf_output < e->time_begin)
+        error("Time of first STF (%e) must be after the simulation start t=%e.",
+              e->time_first_stf_output, e->time_begin);
     }
   }
 
@@ -6298,12 +4776,32 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
                 MPI_COMM_WORLD);
 #endif
 
-  /* Find the time of the first snapshot  output */
+#if defined(WITH_LOGGER)
+  if (e->nodeID == 0)
+    message(
+        "WARNING: There is currently no way of predicting the output "
+        "size, please use it carefully");
+#endif
+
+  /* Find the time of the first snapshot output */
   engine_compute_next_snapshot_time(e);
 
   /* Find the time of the first statistics output */
   engine_compute_next_statistics_time(e);
 
+  /* Find the time of the first stf output */
+  if (e->policy & engine_policy_structure_finding) {
+    engine_compute_next_stf_time(e);
+  }
+
+  /* Check that we are invoking VELOCIraptor only if we have it */
+  if (e->snapshot_invoke_stf &&
+      !(e->policy & engine_policy_structure_finding)) {
+    error(
+        "Invoking VELOCIraptor after snapshots but structure finding wasn't "
+        "activated at runtime (Use --velociraptor).");
+  }
+
   /* Whether restarts are enabled. Yes by default. Can be changed on restart. */
   e->restart_dump = parser_get_opt_param_int(params, "Restarts:enable", 1);
 
@@ -6338,7 +4836,9 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
 /* Construct types for MPI communications */
 #ifdef WITH_MPI
   part_create_mpi_types();
-  stats_create_MPI_type();
+  multipole_create_mpi_types();
+  stats_create_mpi_type();
+  proxy_create_mpi_type();
   task_create_mpi_comms();
 #endif
 
@@ -6356,16 +4856,21 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
   /* Expected average for tasks per cell. If set to zero we use a heuristic
    * guess based on the numbers of cells and how many tasks per cell we expect.
    * On restart this number cannot be estimated (no cells yet), so we recover
-   * from the end of the dumped run. Can be changed on restart.
-   */
+   * from the end of the dumped run. Can be changed on restart. */
   e->tasks_per_cell =
-      parser_get_opt_param_int(params, "Scheduler:tasks_per_cell", 0);
-  int maxtasks = 0;
+      parser_get_opt_param_float(params, "Scheduler:tasks_per_cell", 0.0);
+  e->tasks_per_cell_max = 0.0f;
+
+  float maxtasks = 0;
   if (restart)
     maxtasks = e->restart_max_tasks;
   else
     maxtasks = engine_estimate_nr_tasks(e);
 
+  /* Estimated number of links per tasks */
+  e->links_per_tasks =
+      parser_get_opt_param_int(params, "Scheduler:links_per_tasks", 10);
+
   /* Init the scheduler. */
   scheduler_init(&e->sched, e->s, maxtasks, nr_queues,
                  (e->policy & scheduler_flag_steal), e->nodeID, &e->threadpool);
@@ -6443,7 +4948,17 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
     }
   }
 
-/* Free the affinity stuff */
+#ifdef WITH_LOGGER
+  /* Write the particle logger header */
+  logger_write_file_header(e->logger, e);
+#endif
+
+  /* Initialise the structure finder */
+#ifdef HAVE_VELOCIRAPTOR
+  if (e->policy & engine_policy_structure_finding) velociraptor_init(e);
+#endif
+
+    /* Free the affinity stuff */
 #if defined(HAVE_SETAFFINITY)
   if (with_aff) {
     free(cpuid);
@@ -6487,6 +5002,7 @@ void engine_print_policy(struct engine *e) {
  * @param e The #engine.
  */
 void engine_compute_next_snapshot_time(struct engine *e) {
+
   /* Do outputlist file case */
   if (e->output_list_snapshots) {
     output_list_read_next_time(e->output_list_snapshots, e, "snapshots",
@@ -6507,6 +5023,8 @@ void engine_compute_next_snapshot_time(struct engine *e) {
     time = e->a_first_snapshot;
   else
     time = e->time_first_snapshot;
+
+  int found_snapshot_time = 0;
   while (time < time_end) {
 
     /* Output time on the integer timeline */
@@ -6516,7 +5034,10 @@ void engine_compute_next_snapshot_time(struct engine *e) {
       e->ti_next_snapshot = (time - e->time_begin) / e->time_base;
 
     /* Found it? */
-    if (e->ti_next_snapshot > e->ti_current) break;
+    if (e->ti_next_snapshot > e->ti_current) {
+      found_snapshot_time = 1;
+      break;
+    }
 
     if (e->policy & engine_policy_cosmology)
       time *= e->delta_time_snapshot;
@@ -6525,7 +5046,7 @@ void engine_compute_next_snapshot_time(struct engine *e) {
   }
 
   /* Deal with last snapshot */
-  if (e->ti_next_snapshot >= max_nr_timesteps) {
+  if (!found_snapshot_time) {
     e->ti_next_snapshot = -1;
     if (e->verbose) message("No further output time.");
   } else {
@@ -6571,6 +5092,8 @@ void engine_compute_next_statistics_time(struct engine *e) {
     time = e->a_first_statistics;
   else
     time = e->time_first_statistics;
+
+  int found_stats_time = 0;
   while (time < time_end) {
 
     /* Output time on the integer timeline */
@@ -6580,7 +5103,10 @@ void engine_compute_next_statistics_time(struct engine *e) {
       e->ti_next_stats = (time - e->time_begin) / e->time_base;
 
     /* Found it? */
-    if (e->ti_next_stats > e->ti_current) break;
+    if (e->ti_next_stats > e->ti_current) {
+      found_stats_time = 1;
+      break;
+    }
 
     if (e->policy & engine_policy_cosmology)
       time *= e->delta_time_statistics;
@@ -6589,7 +5115,7 @@ void engine_compute_next_statistics_time(struct engine *e) {
   }
 
   /* Deal with last statistics */
-  if (e->ti_next_stats >= max_nr_timesteps) {
+  if (!found_stats_time) {
     e->ti_next_stats = -1;
     if (e->verbose) message("No further output time.");
   } else {
@@ -6619,54 +5145,61 @@ void engine_compute_next_statistics_time(struct engine *e) {
 void engine_compute_next_stf_time(struct engine *e) {
   /* Do output_list file case */
   if (e->output_list_stf) {
-    output_list_read_next_time(e->output_list_stf, e, "stf", &e->ti_nextSTF);
+    output_list_read_next_time(e->output_list_stf, e, "stf", &e->ti_next_stf);
     return;
   }
 
   /* Find upper-bound on last output */
   double time_end;
   if (e->policy & engine_policy_cosmology)
-    time_end = e->cosmology->a_end * e->deltaTimeSTF;
+    time_end = e->cosmology->a_end * e->delta_time_stf;
   else
-    time_end = e->time_end + e->deltaTimeSTF;
+    time_end = e->time_end + e->delta_time_stf;
 
   /* Find next snasphot above current time */
-  double time = e->timeFirstSTFOutput;
+  double time;
+  if (e->policy & engine_policy_cosmology)
+    time = e->a_first_stf_output;
+  else
+    time = e->time_first_stf_output;
 
+  int found_stf_time = 0;
   while (time < time_end) {
 
     /* Output time on the integer timeline */
     if (e->policy & engine_policy_cosmology)
-      e->ti_nextSTF = log(time / e->cosmology->a_begin) / e->time_base;
+      e->ti_next_stf = log(time / e->cosmology->a_begin) / e->time_base;
     else
-      e->ti_nextSTF = (time - e->time_begin) / e->time_base;
+      e->ti_next_stf = (time - e->time_begin) / e->time_base;
 
     /* Found it? */
-    if (e->ti_nextSTF > e->ti_current) break;
+    if (e->ti_next_stf > e->ti_current) {
+      found_stf_time = 1;
+      break;
+    }
 
     if (e->policy & engine_policy_cosmology)
-      time *= e->deltaTimeSTF;
+      time *= e->delta_time_stf;
     else
-      time += e->deltaTimeSTF;
+      time += e->delta_time_stf;
   }
 
   /* Deal with last snapshot */
-  if (e->ti_nextSTF >= max_nr_timesteps) {
-    e->ti_nextSTF = -1;
+  if (!found_stf_time) {
+    e->ti_next_stf = -1;
     if (e->verbose) message("No further output time.");
   } else {
 
     /* Be nice, talk... */
     if (e->policy & engine_policy_cosmology) {
-      const float next_snapshot_time =
-          exp(e->ti_nextSTF * e->time_base) * e->cosmology->a_begin;
+      const float next_stf_time =
+          exp(e->ti_next_stf * e->time_base) * e->cosmology->a_begin;
       if (e->verbose)
-        message("Next output time set to a=%e.", next_snapshot_time);
+        message("Next VELOCIraptor time set to a=%e.", next_stf_time);
     } else {
-      const float next_snapshot_time =
-          e->ti_nextSTF * e->time_base + e->time_begin;
+      const float next_stf_time = e->ti_next_stf * e->time_base + e->time_begin;
       if (e->verbose)
-        message("Next output time set to t=%e.", next_snapshot_time);
+        message("Next VELOCIraptor time set to t=%e.", next_stf_time);
     }
   }
 }
@@ -6707,14 +5240,14 @@ void engine_init_output_lists(struct engine *e, struct swift_params *params) {
   /* Deal with stf */
   double stf_time_first;
   e->output_list_stf = NULL;
-  output_list_init(&e->output_list_stf, e, "StructureFinding", &e->deltaTimeSTF,
-                   &stf_time_first);
+  output_list_init(&e->output_list_stf, e, "StructureFinding",
+                   &e->delta_time_stf, &stf_time_first);
 
   if (e->output_list_stf) {
     if (e->policy & engine_policy_cosmology)
-      e->a_first_stf = stf_time_first;
+      e->a_first_stf_output = stf_time_first;
     else
-      e->timeFirstSTFOutput = stf_time_first;
+      e->time_first_stf_output = stf_time_first;
   }
 }
 
@@ -6726,12 +5259,16 @@ void engine_init_output_lists(struct engine *e, struct swift_params *params) {
  */
 void engine_recompute_displacement_constraint(struct engine *e) {
 
+  const ticks tic = getticks();
+
   /* Get the cosmological information */
   const struct cosmology *cosmo = e->cosmology;
   const float Om = cosmo->Omega_m;
   const float Ob = cosmo->Omega_b;
-  const float rho_crit = cosmo->critical_density;
+  const float H0 = cosmo->H0;
   const float a = cosmo->a;
+  const float G_newton = e->physical_constants->const_newton_G;
+  const float rho_crit0 = 3.f * H0 * H0 / (8.f * M_PI * G_newton);
 
   /* Start by reducing the minimal mass of each particle type */
   float min_mass[swift_type_count] = {e->s->min_part_mass,
@@ -6743,9 +5280,11 @@ void engine_recompute_displacement_constraint(struct engine *e) {
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that the minimal mass collection worked */
   float min_part_mass_check = FLT_MAX;
-  for (size_t i = 0; i < e->s->nr_parts; ++i)
+  for (size_t i = 0; i < e->s->nr_parts; ++i) {
+    if (e->s->parts[i].time_bin >= num_time_bins) continue;
     min_part_mass_check =
         min(min_part_mass_check, hydro_get_mass(&e->s->parts[i]));
+  }
   if (min_part_mass_check != min_mass[swift_type_gas])
     error("Error collecting minimal mass of gas particles.");
 #endif
@@ -6787,7 +5326,7 @@ void engine_recompute_displacement_constraint(struct engine *e) {
 
   /* Mesh forces smoothing scale */
   float r_s;
-  if ((e->policy & engine_policy_self_gravity) && e->s->periodic == 1)
+  if ((e->policy & engine_policy_self_gravity) && e->s->periodic)
     r_s = e->mesh->r_s;
   else
     r_s = FLT_MAX;
@@ -6801,7 +5340,7 @@ void engine_recompute_displacement_constraint(struct engine *e) {
     const float min_mass_dm = min_mass[1];
 
     /* Inter-particle sepration for the DM */
-    const float d_dm = cbrtf(min_mass_dm / ((Om - Ob) * rho_crit));
+    const float d_dm = cbrtf(min_mass_dm / ((Om - Ob) * rho_crit0));
 
     /* RMS peculiar motion for the DM */
     const float rms_vel_dm = vel_norm_dm / N_dm;
@@ -6817,7 +5356,7 @@ void engine_recompute_displacement_constraint(struct engine *e) {
     const float min_mass_b = min(min_mass[0], min_mass[4]);
 
     /* Inter-particle sepration for the baryons */
-    const float d_b = cbrtf(min_mass_b / (Ob * rho_crit));
+    const float d_b = cbrtf(min_mass_b / (Ob * rho_crit0));
 
     /* RMS peculiar motion for the baryons */
     const float rms_vel_b = vel_norm_b / N_b;
@@ -6834,6 +5373,10 @@ void engine_recompute_displacement_constraint(struct engine *e) {
 
   if (e->verbose)
     message("max_dt_RMS_displacement = %e", e->dt_max_RMS_displacement);
+
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 /**
@@ -6851,20 +5394,16 @@ void engine_clean(struct engine *e) {
   }
   free(e->runners);
   free(e->snapshot_units);
-  if (e->output_list_snapshots) {
-    output_list_clean(e->output_list_snapshots);
-    free(e->output_list_snapshots);
-  }
-  if (e->output_list_stats) {
-    output_list_clean(e->output_list_stats);
-    free(e->output_list_stats);
-  }
-  if (e->output_list_stf) {
-    output_list_clean(e->output_list_stf);
-    free(e->output_list_stf);
-  }
+
+  output_list_clean(&e->output_list_snapshots);
+  output_list_clean(&e->output_list_stats);
+  output_list_clean(&e->output_list_stf);
+
   free(e->links);
-  free(e->cell_loc);
+#if defined(WITH_LOGGER)
+  logger_clean(e->logger);
+  free(e->logger);
+#endif
   scheduler_clean(&e->sched);
   space_clean(e->s);
   threadpool_clean(&e->threadpool);
@@ -6898,12 +5437,14 @@ void engine_struct_dump(struct engine *e, FILE *stream) {
 
   phys_const_struct_dump(e->physical_constants, stream);
   hydro_props_struct_dump(e->hydro_properties, stream);
+  entropy_floor_struct_dump(e->entropy_floor, stream);
   gravity_props_struct_dump(e->gravity_properties, stream);
+  stars_props_struct_dump(e->stars_properties, stream);
   pm_mesh_struct_dump(e->mesh, stream);
   potential_struct_dump(e->external_potential, stream);
   cooling_struct_dump(e->cooling_func, stream);
+  starformation_struct_dump(e->star_formation, stream);
   chemistry_struct_dump(e->chemistry, stream);
-  sourceterms_struct_dump(e->sourceterms, stream);
   parser_struct_dump(e->parameter_file, stream);
   if (e->output_list_snapshots)
     output_list_struct_dump(e->output_list_snapshots, stream);
@@ -6969,11 +5510,22 @@ void engine_struct_restore(struct engine *e, FILE *stream) {
   hydro_props_struct_restore(hydro_properties, stream);
   e->hydro_properties = hydro_properties;
 
+  struct entropy_floor_properties *entropy_floor =
+      (struct entropy_floor_properties *)malloc(
+          sizeof(struct entropy_floor_properties));
+  entropy_floor_struct_restore(entropy_floor, stream);
+  e->entropy_floor = entropy_floor;
+
   struct gravity_props *gravity_properties =
       (struct gravity_props *)malloc(sizeof(struct gravity_props));
   gravity_props_struct_restore(gravity_properties, stream);
   e->gravity_properties = gravity_properties;
 
+  struct stars_props *stars_properties =
+      (struct stars_props *)malloc(sizeof(struct stars_props));
+  stars_props_struct_restore(stars_properties, stream);
+  e->stars_properties = stars_properties;
+
   struct pm_mesh *mesh = (struct pm_mesh *)malloc(sizeof(struct pm_mesh));
   pm_mesh_struct_restore(mesh, stream);
   e->mesh = mesh;
@@ -6986,20 +5538,20 @@ void engine_struct_restore(struct engine *e, FILE *stream) {
   struct cooling_function_data *cooling_func =
       (struct cooling_function_data *)malloc(
           sizeof(struct cooling_function_data));
-  cooling_struct_restore(cooling_func, stream);
+  cooling_struct_restore(cooling_func, stream, e->cosmology);
   e->cooling_func = cooling_func;
 
+  struct star_formation *star_formation =
+      (struct star_formation *)malloc(sizeof(struct star_formation));
+  starformation_struct_restore(star_formation, stream);
+  e->star_formation = star_formation;
+
   struct chemistry_global_data *chemistry =
       (struct chemistry_global_data *)malloc(
           sizeof(struct chemistry_global_data));
   chemistry_struct_restore(chemistry, stream);
   e->chemistry = chemistry;
 
-  struct sourceterms *sourceterms =
-      (struct sourceterms *)malloc(sizeof(struct sourceterms));
-  sourceterms_struct_restore(sourceterms, stream);
-  e->sourceterms = sourceterms;
-
   struct swift_params *parameter_file =
       (struct swift_params *)malloc(sizeof(struct swift_params));
   parser_struct_restore(parameter_file, stream);
diff --git a/src/engine.h b/src/engine.h
index 5dfee503f3b71cc9c76a00fc0ba05bc41b1d9c08..95adae81c99e2036f9e519bbea8c2212e22e56db 100644
--- a/src/engine.h
+++ b/src/engine.h
@@ -38,6 +38,7 @@
 #include "clocks.h"
 #include "collectgroup.h"
 #include "cooling_struct.h"
+#include "dump.h"
 #include "gravity_properties.h"
 #include "mesh_gravity.h"
 #include "parser.h"
@@ -45,10 +46,10 @@
 #include "potential.h"
 #include "runner.h"
 #include "scheduler.h"
-#include "sourceterms_struct.h"
 #include "space.h"
 #include "task.h"
 #include "units.h"
+#include "velociraptor_interface.h"
 
 /**
  * @brief The different policies the #engine can follow.
@@ -68,13 +69,16 @@ enum engine_policy {
   engine_policy_cosmology = (1 << 10),
   engine_policy_drift_all = (1 << 11),
   engine_policy_reconstruct_mpoles = (1 << 12),
-  engine_policy_cooling = (1 << 13),
-  engine_policy_sourceterms = (1 << 14),
+  engine_policy_temperature = (1 << 13),
+  engine_policy_cooling = (1 << 14),
   engine_policy_stars = (1 << 15),
-  engine_policy_structure_finding = (1 << 16)
+  engine_policy_structure_finding = (1 << 16),
+  engine_policy_star_formation = (1 << 17),
+  engine_policy_feedback = (1 << 18),
+  engine_policy_limiter = (1 << 19)
 };
-#define engine_maxpolicy 16
-extern const char *engine_policy_names[];
+#define engine_maxpolicy 20
+extern const char *engine_policy_names[engine_maxpolicy + 1];
 
 /**
  * @brief The different unusual events that can take place in a time-step.
@@ -86,17 +90,24 @@ enum engine_step_properties {
   engine_step_prop_repartition = (1 << 2),
   engine_step_prop_statistics = (1 << 3),
   engine_step_prop_snapshot = (1 << 4),
-  engine_step_prop_restarts = (1 << 5)
+  engine_step_prop_restarts = (1 << 5),
+  engine_step_prop_stf = (1 << 6),
+  engine_step_prop_logger_index = (1 << 7)
 };
 
 /* Some constants */
 #define engine_maxproxies 64
 #define engine_tasksreweight 1
 #define engine_parts_size_grow 1.05
+#define engine_max_proxy_centre_frac 0.2
 #define engine_redistribute_alloc_margin 1.2
+#define engine_rebuild_link_alloc_margin 1.2
+#define engine_foreign_alloc_margin 1.05
 #define engine_default_energy_file_name "energy"
 #define engine_default_timesteps_file_name "timesteps"
 #define engine_max_parts_per_ghost 1000
+#define engine_max_sparts_per_ghost 1000
+#define engine_tasks_per_cell_margin 1.2
 
 /**
  * @brief The rank of the engine as a global variable (for messages).
@@ -206,15 +217,27 @@ struct engine {
   /* Total numbers of particles in the system. */
   long long total_nr_parts, total_nr_gparts, total_nr_sparts;
 
+  /* Total numbers of cells (top-level and sub-cells) in the system. */
+  long long total_nr_cells;
+
+  /* Total numbers of tasks in the system. */
+  long long total_nr_tasks;
+
+  /* The total number of inhibited particles in the system. */
+  long long nr_inhibited_parts, nr_inhibited_gparts, nr_inhibited_sparts;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Total number of particles removed from the system since the last rebuild */
+  long long count_inhibited_parts, count_inhibited_gparts,
+      count_inhibited_sparts;
+#endif
+
   /* Total mass in the simulation */
   double total_mass;
 
   /* The internal system of units */
   const struct unit_system *internal_units;
 
-  /* Top-level cell locations for VELOCIraptor. */
-  struct cell_loc *cell_loc;
-
   /* Snapshot information */
   double a_first_snapshot;
   double time_first_snapshot;
@@ -228,25 +251,25 @@ struct engine {
 
   char snapshot_base_name[PARSER_MAX_LINE_SIZE];
   int snapshot_compression;
-  int snapshot_label_first;
-  int snapshot_label_delta;
+  int snapshot_int_time_label_on;
+  int snapshot_invoke_stf;
   struct unit_system *snapshot_units;
   int snapshot_output_count;
 
   /* Structure finding information */
-  int stf_output_freq_format;
-  double a_first_stf;
-  double timeFirstSTFOutput;
-  double deltaTimeSTF;
-  int deltaStepSTF;
+  double a_first_stf_output;
+  double time_first_stf_output;
+  double delta_time_stf;
 
   /* Output_List for the structure finding */
   struct output_list *output_list_stf;
 
   /* Integer time of the next stf output */
-  integertime_t ti_nextSTF;
+  integertime_t ti_next_stf;
 
-  char stfBaseName[PARSER_MAX_LINE_SIZE];
+  char stf_config_file_name[PARSER_MAX_LINE_SIZE];
+  char stf_base_name[PARSER_MAX_LINE_SIZE];
+  int stf_output_count;
 
   /* Statistics information */
   double a_first_statistics;
@@ -279,10 +302,8 @@ struct engine {
   struct proxy *proxies;
   int nr_proxies, *proxy_ind;
 
-#ifdef SWIFT_DEBUG_TASKS
   /* Tic/toc at the start/end of a step. */
   ticks tic_step, toc_step;
-#endif
 
 #ifdef WITH_MPI
   /* CPU time of the last step. */
@@ -305,6 +326,10 @@ struct engine {
   int forcerepart;
   struct repartition *reparttype;
 
+#ifdef WITH_LOGGER
+  struct logger *logger;
+#endif
+
   /* How many steps have we done with the same set of tasks? */
   int tasks_age;
 
@@ -313,8 +338,13 @@ struct engine {
   size_t nr_links, size_links;
 
   /* Average number of tasks per cell. Used to estimate the sizes
-   * of the various task arrays. */
-  size_t tasks_per_cell;
+   * of the various task arrays. Also the maximum from all ranks. */
+  float tasks_per_cell;
+  float tasks_per_cell_max;
+
+  /* Average number of links per tasks. This number is used before
+     the creation of communication tasks so needs to be large enough. */
+  size_t links_per_tasks;
 
   /* Are we talkative ? */
   int verbose;
@@ -326,7 +356,13 @@ struct engine {
   struct cosmology *cosmology;
 
   /* Properties of the hydro scheme */
-  const struct hydro_props *hydro_properties;
+  struct hydro_props *hydro_properties;
+
+  /* Properties of the entropy floor */
+  const struct entropy_floor_properties *entropy_floor;
+
+  /* Properties of the star model */
+  const struct stars_props *stars_properties;
 
   /* Properties of the self-gravity scheme */
   struct gravity_props *gravity_properties;
@@ -338,14 +374,14 @@ struct engine {
   const struct external_potential *external_potential;
 
   /* Properties of the cooling scheme */
-  const struct cooling_function_data *cooling_func;
+  struct cooling_function_data *cooling_func;
+
+  /* Properties of the starformation law */
+  const struct star_formation *star_formation;
 
   /* Properties of the chemistry model */
   const struct chemistry_global_data *chemistry;
 
-  /* Properties of source terms */
-  struct sourceterms *sourceterms;
-
   /* The (parsed) parameter file */
   struct swift_params *parameter_file;
 
@@ -375,7 +411,7 @@ struct engine {
   int restart_max_tasks;
 };
 
-/* Function prototypes. */
+/* Function prototypes, engine.c. */
 void engine_addlink(struct engine *e, struct link **l, struct task *t);
 void engine_barrier(struct engine *e);
 void engine_compute_next_snapshot_time(struct engine *e);
@@ -383,9 +419,10 @@ void engine_compute_next_stf_time(struct engine *e);
 void engine_compute_next_statistics_time(struct engine *e);
 void engine_recompute_displacement_constraint(struct engine *e);
 void engine_unskip(struct engine *e);
-void engine_drift_all(struct engine *e);
+void engine_drift_all(struct engine *e, const int drift_mpoles);
 void engine_drift_top_multipoles(struct engine *e);
 void engine_reconstruct_multipoles(struct engine *e);
+void engine_allocate_foreign_particles(struct engine *e);
 void engine_print_stats(struct engine *e);
 void engine_check_for_dumps(struct engine *e);
 void engine_dump_snapshot(struct engine *e);
@@ -395,28 +432,30 @@ void engine_init(struct engine *e, struct space *s, struct swift_params *params,
                  int policy, int verbose, struct repartition *reparttype,
                  const struct unit_system *internal_units,
                  const struct phys_const *physical_constants,
-                 struct cosmology *cosmo, const struct hydro_props *hydro,
-                 struct gravity_props *gravity, struct pm_mesh *mesh,
+                 struct cosmology *cosmo, struct hydro_props *hydro,
+                 const struct entropy_floor_properties *entropy_floor,
+                 struct gravity_props *gravity, const struct stars_props *stars,
+                 struct pm_mesh *mesh,
                  const struct external_potential *potential,
-                 const struct cooling_function_data *cooling_func,
-                 const struct chemistry_global_data *chemistry,
-                 struct sourceterms *sourceterms);
+                 struct cooling_function_data *cooling_func,
+                 const struct star_formation *starform,
+                 const struct chemistry_global_data *chemistry);
 void engine_config(int restart, struct engine *e, struct swift_params *params,
                    int nr_nodes, int nodeID, int nr_threads, int with_aff,
                    int verbose, const char *restart_file);
+void engine_dump_index(struct engine *e);
 void engine_launch(struct engine *e);
 void engine_prepare(struct engine *e);
 void engine_init_particles(struct engine *e, int flag_entropy_ICs,
                            int clean_h_values);
 void engine_step(struct engine *e);
-void engine_maketasks(struct engine *e);
 void engine_split(struct engine *e, struct partition *initial_partition);
-void engine_exchange_strays(struct engine *e, size_t offset_parts,
-                            int *ind_part, size_t *Npart, size_t offset_gparts,
-                            int *ind_gpart, size_t *Ngpart,
-                            size_t offset_sparts, int *ind_spart,
-                            size_t *Nspart);
-void engine_rebuild(struct engine *e, int clean_h_values);
+void engine_exchange_strays(struct engine *e, const size_t offset_parts,
+                            const int *ind_part, size_t *Npart,
+                            const size_t offset_gparts, const int *ind_gpart,
+                            size_t *Ngpart, const size_t offset_sparts,
+                            const int *ind_spart, size_t *Nspart);
+void engine_rebuild(struct engine *e, int redistributed, int clean_h_values);
 void engine_repartition(struct engine *e);
 void engine_repartition_trigger(struct engine *e);
 void engine_makeproxies(struct engine *e);
@@ -426,7 +465,13 @@ int engine_is_done(struct engine *e);
 void engine_pin(void);
 void engine_unpin(void);
 void engine_clean(struct engine *e);
-int engine_estimate_nr_tasks(struct engine *e);
+int engine_estimate_nr_tasks(const struct engine *e);
+
+/* Function prototypes, engine_maketasks.c. */
+void engine_maketasks(struct engine *e);
+
+/* Function prototypes, engine_marktasks.c. */
+int engine_marktasks(struct engine *e);
 
 #ifdef HAVE_SETAFFINITY
 cpu_set_t *engine_entry_affinity(void);
diff --git a/src/engine_drift.c b/src/engine_drift.c
new file mode 100644
index 0000000000000000000000000000000000000000..1b0711619d68da02753f307190ca3a0624feecce
--- /dev/null
+++ b/src/engine_drift.c
@@ -0,0 +1,350 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
+ *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
+ *                    Angus Lepper (angus.lepper@ed.ac.uk)
+ *               2016 John A. Regan (john.a.regan@durham.ac.uk)
+ *                    Tom Theuns (tom.theuns@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* This object's header. */
+#include "engine.h"
+
+/**
+ * @brief Mapper function to drift *all* the #part to the current time.
+ *
+ * @param map_data An array of #cell%s.
+ * @param num_elements Chunk size.
+ * @param extra_data Pointer to an #engine.
+ */
+void engine_do_drift_all_part_mapper(void *map_data, int num_elements,
+                                     void *extra_data) {
+
+  const struct engine *e = (const struct engine *)extra_data;
+  const int restarting = e->restarting;
+  struct space *s = e->s;
+  struct cell *cells_top;
+  int *local_cells_top;
+
+  if (restarting) {
+
+    /* When restarting, we loop over all top-level cells */
+    cells_top = (struct cell *)map_data;
+    local_cells_top = NULL;
+
+  } else {
+
+    /* In any other case, we use the list of local cells with tasks */
+    cells_top = s->cells_top;
+    local_cells_top = (int *)map_data;
+  }
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    struct cell *c;
+
+    /* When restarting, the list of local cells does not
+       yet exist. We use the raw list of top-level cells instead */
+    if (restarting)
+      c = &cells_top[ind];
+    else
+      c = &cells_top[local_cells_top[ind]];
+
+    if (c->nodeID == e->nodeID) {
+
+      /* Drift all the particles */
+      cell_drift_part(c, e, /* force the drift=*/1);
+    }
+  }
+}
+
+/**
+ * @brief Mapper function to drift *all* the #gpart to the current time.
+ *
+ * @param map_data An array of #cell%s.
+ * @param num_elements Chunk size.
+ * @param extra_data Pointer to an #engine.
+ */
+void engine_do_drift_all_gpart_mapper(void *map_data, int num_elements,
+                                      void *extra_data) {
+
+  const struct engine *e = (const struct engine *)extra_data;
+  const int restarting = e->restarting;
+  struct space *s = e->s;
+  struct cell *cells_top;
+  int *local_cells_top;
+
+  if (restarting) {
+
+    /* When restarting, we loop over all top-level cells */
+    cells_top = (struct cell *)map_data;
+    local_cells_top = NULL;
+
+  } else {
+
+    /* In any other case, we use the list of local cells with tasks */
+    cells_top = s->cells_top;
+    local_cells_top = (int *)map_data;
+  }
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    struct cell *c;
+
+    /* When restarting, the list of local cells does not
+       yet exist. We use the raw list of top-level cells instead */
+    if (restarting)
+      c = &cells_top[ind];
+    else
+      c = &cells_top[local_cells_top[ind]];
+
+    if (c->nodeID == e->nodeID) {
+
+      /* Drift all the particles */
+      cell_drift_gpart(c, e, /* force the drift=*/1);
+    }
+  }
+}
+
+/**
+ * @brief Mapper function to drift *all* the #spart to the current time.
+ *
+ * @param map_data An array of #cell%s.
+ * @param num_elements Chunk size.
+ * @param extra_data Pointer to an #engine.
+ */
+void engine_do_drift_all_spart_mapper(void *map_data, int num_elements,
+                                      void *extra_data) {
+
+  const struct engine *e = (const struct engine *)extra_data;
+  const int restarting = e->restarting;
+  struct space *s = e->s;
+  struct cell *cells_top;
+  int *local_cells_top;
+
+  if (restarting) {
+
+    /* When restarting, we loop over all top-level cells */
+    cells_top = (struct cell *)map_data;
+    local_cells_top = NULL;
+
+  } else {
+
+    /* In any other case, we use the list of local cells with tasks */
+    cells_top = s->cells_top;
+    local_cells_top = (int *)map_data;
+  }
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    struct cell *c;
+
+    /* When restarting, the list of local cells does not
+       yet exist. We use the raw list of top-level cells instead */
+    if (restarting)
+      c = &cells_top[ind];
+    else
+      c = &cells_top[local_cells_top[ind]];
+
+    if (c->nodeID == e->nodeID) {
+
+      /* Drift all the particles */
+      cell_drift_spart(c, e, /* force the drift=*/1);
+    }
+  }
+}
+
+/**
+ * @brief Mapper function to drift *all* the multipoles to the current time.
+ *
+ * @param map_data An array of #cell%s.
+ * @param num_elements Chunk size.
+ * @param extra_data Pointer to an #engine.
+ */
+void engine_do_drift_all_multipole_mapper(void *map_data, int num_elements,
+                                          void *extra_data) {
+
+  const struct engine *e = (const struct engine *)extra_data;
+  const int restarting = e->restarting;
+  struct space *s = e->s;
+  struct cell *cells_top;
+  int *local_cells_with_tasks_top;
+
+  if (restarting) {
+
+    /* When restarting, we loop over all top-level cells */
+    cells_top = (struct cell *)map_data;
+    local_cells_with_tasks_top = NULL;
+
+  } else {
+
+    /* In any other case, we use the list of local cells with tasks */
+    cells_top = s->cells_top;
+    local_cells_with_tasks_top = (int *)map_data;
+  }
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    struct cell *c;
+
+    /* When restarting, the list of local cells does not
+       yet exist. We use the raw list of top-level cells instead */
+    if (restarting)
+      c = &cells_top[ind];
+    else
+      c = &cells_top[local_cells_with_tasks_top[ind]];
+
+    cell_drift_all_multipoles(c, e);
+  }
+}
+
+/**
+ * @brief Drift *all* particles and multipoles at all levels
+ * forward to the current time.
+ *
+ * @param e The #engine.
+ * @param drift_mpoles Do we want to drift all the multipoles as well?
+ */
+void engine_drift_all(struct engine *e, const int drift_mpoles) {
+
+  const ticks tic = getticks();
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (e->nodeID == 0) {
+    if (e->policy & engine_policy_cosmology)
+      message("Drifting all to a=%e",
+              exp(e->ti_current * e->time_base) * e->cosmology->a_begin);
+    else
+      message("Drifting all to t=%e",
+              e->ti_current * e->time_base + e->time_begin);
+  }
+#endif
+
+  if (!e->restarting) {
+
+    /* Normal case: We have a list of local cells with tasks to play with */
+
+    if (e->s->nr_parts > 0) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_part_mapper,
+                     e->s->local_cells_top, e->s->nr_local_cells, sizeof(int),
+                     /* default chunk */ 0, e);
+    }
+    if (e->s->nr_gparts > 0) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_gpart_mapper,
+                     e->s->local_cells_top, e->s->nr_local_cells, sizeof(int),
+                     /* default chunk */ 0, e);
+    }
+    if (e->s->nr_sparts > 0) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_spart_mapper,
+                     e->s->local_cells_top, e->s->nr_local_cells, sizeof(int),
+                     /* default chunk */ 0, e);
+    }
+    if (drift_mpoles && (e->policy & engine_policy_self_gravity)) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_multipole_mapper,
+                     e->s->local_cells_with_tasks_top,
+                     e->s->nr_local_cells_with_tasks, sizeof(int),
+                     /* default chunk */ 0, e);
+    }
+
+  } else {
+
+    /* When restarting, the list of local cells with tasks does not yet
+       exist. We use the raw list of top-level cells instead */
+
+    if (e->s->nr_parts > 0) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_part_mapper,
+                     e->s->cells_top, e->s->nr_cells, sizeof(struct cell),
+                     /* default chunk */ 0, e);
+    }
+    if (e->s->nr_gparts > 0) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_gpart_mapper,
+                     e->s->cells_top, e->s->nr_cells, sizeof(struct cell),
+                     /* default chunk */ 0, e);
+    }
+    if (e->policy & engine_policy_self_gravity) {
+      threadpool_map(&e->threadpool, engine_do_drift_all_multipole_mapper,
+                     e->s->cells_top, e->s->nr_cells, sizeof(struct cell),
+                     /* default chunk */ 0, e);
+    }
+  }
+
+  /* Synchronize particle positions */
+  space_synchronize_particle_positions(e->s);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check that all cells have been drifted to the current time. */
+  space_check_drift_point(
+      e->s, e->ti_current,
+      drift_mpoles && (e->policy & engine_policy_self_gravity));
+  part_verify_links(e->s->parts, e->s->gparts, e->s->sparts, e->s->nr_parts,
+                    e->s->nr_gparts, e->s->nr_sparts, e->verbose);
+#endif
+
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
+
+/**
+ * @brief Mapper function to drift *all* top-level multipoles forward in
+ * time.
+ *
+ * @param map_data An array of #cell%s.
+ * @param num_elements Chunk size.
+ * @param extra_data Pointer to an #engine.
+ */
+void engine_do_drift_top_multipoles_mapper(void *map_data, int num_elements,
+                                           void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  struct cell *cells = (struct cell *)map_data;
+
+  for (int ind = 0; ind < num_elements; ind++) {
+    struct cell *c = &cells[ind];
+    if (c != NULL) {
+
+      /* Drift the multipole at this level only */
+      if (c->grav.ti_old_multipole != e->ti_current) cell_drift_multipole(c, e);
+    }
+  }
+}
+
+/**
+ * @brief Drift *all* top-level multipoles forward to the current time.
+ *
+ * @param e The #engine.
+ */
+void engine_drift_top_multipoles(struct engine *e) {
+
+  const ticks tic = getticks();
+
+  threadpool_map(&e->threadpool, engine_do_drift_top_multipoles_mapper,
+                 e->s->cells_top, e->s->nr_cells, sizeof(struct cell), 0, e);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check that all cells have been drifted to the current time. */
+  space_check_top_multipoles_drift_point(e->s, e->ti_current);
+#endif
+
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
diff --git a/src/engine_maketasks.c b/src/engine_maketasks.c
new file mode 100644
index 0000000000000000000000000000000000000000..d1858f87ff0bfdfee878f2e53b81e100812fd0a5
--- /dev/null
+++ b/src/engine_maketasks.c
@@ -0,0 +1,2491 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
+ *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
+ *                    Angus Lepper (angus.lepper@ed.ac.uk)
+ *               2016 John A. Regan (john.a.regan@durham.ac.uk)
+ *                    Tom Theuns (tom.theuns@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <stdlib.h>
+#include <unistd.h>
+
+/* MPI headers. */
+#ifdef WITH_MPI
+#include <mpi.h>
+#endif
+
+/* Load the profiler header, if needed. */
+#ifdef WITH_PROFILER
+#include <gperftools/profiler.h>
+#endif
+
+/* This object's header. */
+#include "engine.h"
+
+/* Local headers. */
+#include "atomic.h"
+#include "cell.h"
+#include "clocks.h"
+#include "cycle.h"
+#include "debug.h"
+#include "error.h"
+#include "proxy.h"
+#include "timers.h"
+
+/**
+ * @brief Add send tasks for the gravity pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param ci The sending #cell.
+ * @param cj Dummy cell containing the nodeID of the receiving node.
+ * @param t_grav The send_grav #task, if it has already been created.
+ */
+void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
+                                  struct cell *cj, struct task *t_grav) {
+
+#ifdef WITH_MPI
+  struct link *l = NULL;
+  struct scheduler *s = &e->sched;
+  const int nodeID = cj->nodeID;
+
+  /* Check if any of the gravity tasks are for the target node. */
+  for (l = ci->grav.grav; l != NULL; l = l->next)
+    if (l->t->ci->nodeID == nodeID ||
+        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+      break;
+
+  /* If so, attach send tasks. */
+  if (l != NULL) {
+
+    /* Create the tasks and their dependencies? */
+    if (t_grav == NULL) {
+
+      /* Make sure this cell is tagged. */
+      cell_ensure_tagged(ci);
+
+      t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart,
+                                 ci->mpi.tag, 0, ci, cj);
+
+      /* The sends should unlock the down pass. */
+      scheduler_addunlock(s, t_grav, ci->grav.super->grav.down);
+
+      /* Drift before you send */
+      scheduler_addunlock(s, ci->grav.super->grav.drift, t_grav);
+    }
+
+    /* Add them to the local cell. */
+    engine_addlink(e, &ci->mpi.grav.send, t_grav);
+  }
+
+  /* Recurse? */
+  if (ci->split)
+    for (int k = 0; k < 8; k++)
+      if (ci->progeny[k] != NULL)
+        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add send tasks for the hydro pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param ci The sending #cell.
+ * @param cj Dummy cell containing the nodeID of the receiving node.
+ * @param t_xv The send_xv #task, if it has already been created.
+ * @param t_rho The send_rho #task, if it has already been created.
+ * @param t_gradient The send_gradient #task, if already created.
+ */
+void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
+                                struct cell *cj, struct task *t_xv,
+                                struct task *t_rho, struct task *t_gradient) {
+
+#ifdef WITH_MPI
+  struct link *l = NULL;
+  struct scheduler *s = &e->sched;
+  const int nodeID = cj->nodeID;
+
+  /* Check if any of the density tasks are for the target node. */
+  for (l = ci->hydro.density; l != NULL; l = l->next)
+    if (l->t->ci->nodeID == nodeID ||
+        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+      break;
+
+  /* If so, attach send tasks. */
+  if (l != NULL) {
+
+    /* Create the tasks and their dependencies? */
+    if (t_xv == NULL) {
+
+      /* Make sure this cell is tagged. */
+      cell_ensure_tagged(ci);
+
+      t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, ci->mpi.tag,
+                               0, ci, cj);
+      t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
+                                ci->mpi.tag, 0, ci, cj);
+
+#ifdef EXTRA_HYDRO_LOOP
+      t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
+                                     ci->mpi.tag, 0, ci, cj);
+#endif
+
+#ifdef EXTRA_HYDRO_LOOP
+
+      scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
+
+      scheduler_addunlock(s, ci->hydro.super->hydro.extra_ghost, t_gradient);
+
+      /* The send_rho task should unlock the super_hydro-cell's extra_ghost
+       * task. */
+      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.extra_ghost);
+
+      /* The send_rho task depends on the cell's ghost task. */
+      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);
+
+      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
+      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);
+
+#else
+      /* The send_rho task should unlock the super_hydro-cell's kick task. */
+      scheduler_addunlock(s, t_rho, ci->hydro.super->hydro.end_force);
+
+      /* The send_rho task depends on the cell's ghost task. */
+      scheduler_addunlock(s, ci->hydro.super->hydro.ghost_out, t_rho);
+
+      /* The send_xv task should unlock the super_hydro-cell's ghost task. */
+      scheduler_addunlock(s, t_xv, ci->hydro.super->hydro.ghost_in);
+
+#endif
+
+      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_rho);
+
+      /* Drift before you send */
+      scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
+    }
+
+    /* Add them to the local cell. */
+    engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv);
+    engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho);
+#ifdef EXTRA_HYDRO_LOOP
+    engine_addlink(e, &ci->mpi.hydro.send_gradient, t_gradient);
+#endif
+  }
+
+  /* Recurse? */
+  if (ci->split)
+    for (int k = 0; k < 8; k++)
+      if (ci->progeny[k] != NULL)
+        engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
+                                   t_gradient);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add send tasks for the stars pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param ci The sending #cell.
+ * @param cj Dummy cell containing the nodeID of the receiving node.
+ * @param t_feedback The send_feed #task, if it has already been created.
+ */
+void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
+                                struct cell *cj, struct task *t_feedback) {
+
+#ifdef WITH_MPI
+
+  struct link *l = NULL;
+  struct scheduler *s = &e->sched;
+  const int nodeID = cj->nodeID;
+
+  /* Check if any of the density tasks are for the target node. */
+  for (l = ci->stars.density; l != NULL; l = l->next)
+    if (l->t->ci->nodeID == nodeID ||
+        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+      break;
+
+  /* If so, attach send tasks. */
+  if (l != NULL) {
+
+    if (t_feedback == NULL) {
+
+      /* Make sure this cell is tagged. */
+      cell_ensure_tagged(ci);
+
+      /* Create the tasks and their dependencies? */
+      t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
+                                     ci->mpi.tag, 0, ci, cj);
+
+      /* The send_stars task should unlock the super_cell's kick task. */
+      scheduler_addunlock(s, t_feedback, ci->hydro.super->stars.stars_out);
+
+      /* Ghost before you send */
+      scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
+
+      /* Drift before you send */
+      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
+    }
+
+    engine_addlink(e, &ci->mpi.stars.send, t_feedback);
+  }
+
+  /* Recurse? */
+  if (ci->split)
+    for (int k = 0; k < 8; k++)
+      if (ci->progeny[k] != NULL)
+        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add send tasks for the time-step to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param ci The sending #cell.
+ * @param cj Dummy cell containing the nodeID of the receiving node.
+ * @param t_ti The send_ti #task, if it has already been created.
+ * @param t_limiter The send_limiter #task, if already created.
+ * @param with_limiter Are we running with the time-step limiter?
+ */
+void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
+                                   struct cell *cj, struct task *t_ti,
+                                   struct task *t_limiter,
+                                   const int with_limiter) {
+
+#ifdef WITH_MPI
+  struct link *l = NULL;
+  struct scheduler *s = &e->sched;
+  const int nodeID = cj->nodeID;
+
+  /* Check if any of the gravity tasks are for the target node. */
+  for (l = ci->grav.grav; l != NULL; l = l->next)
+    if (l->t->ci->nodeID == nodeID ||
+        (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+      break;
+
+  /* Check whether instead any of the hydro tasks are for the target node. */
+  if (l == NULL)
+    for (l = ci->hydro.density; l != NULL; l = l->next)
+      if (l->t->ci->nodeID == nodeID ||
+          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+        break;
+
+  if (l == NULL)
+    for (l = ci->stars.density; l != NULL; l = l->next)
+      if (l->t->ci->nodeID == nodeID ||
+          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+        break;
+
+  /* If found anything, attach send tasks. */
+  if (l != NULL) {
+
+    /* Create the tasks and their dependencies? */
+    if (t_ti == NULL) {
+
+      /* Make sure this cell is tagged. */
+      cell_ensure_tagged(ci);
+
+      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend,
+                               ci->mpi.tag, 0, ci, cj);
+
+      if (with_limiter)
+        t_limiter = scheduler_addtask(s, task_type_send, task_subtype_limiter,
+                                      ci->mpi.tag, 0, ci, cj);
+
+      /* The super-cell's timestep task should unlock the send_ti task. */
+      scheduler_addunlock(s, ci->super->timestep, t_ti);
+      if (with_limiter) scheduler_addunlock(s, t_limiter, ci->super->timestep);
+      if (with_limiter)
+        scheduler_addunlock(s, t_limiter, ci->super->timestep_limiter);
+      if (with_limiter) scheduler_addunlock(s, ci->super->kick2, t_limiter);
+      if (with_limiter)
+        scheduler_addunlock(s, ci->super->timestep_limiter, t_ti);
+    }
+
+    /* Add them to the local cell. */
+    engine_addlink(e, &ci->mpi.send_ti, t_ti);
+    if (with_limiter) engine_addlink(e, &ci->mpi.limiter.send, t_limiter);
+  }
+
+  /* Recurse? */
+  if (ci->split)
+    for (int k = 0; k < 8; k++)
+      if (ci->progeny[k] != NULL)
+        engine_addtasks_send_timestep(e, ci->progeny[k], cj, t_ti, t_limiter,
+                                      with_limiter);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add recv tasks for hydro pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param c The foreign #cell.
+ * @param t_xv The recv_xv #task, if it has already been created.
+ * @param t_rho The recv_rho #task, if it has already been created.
+ * @param t_gradient The recv_gradient #task, if it has already been created.
+ */
+void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
+                                struct task *t_xv, struct task *t_rho,
+                                struct task *t_gradient) {
+
+#ifdef WITH_MPI
+  struct scheduler *s = &e->sched;
+
+  /* Have we reached a level where there are any hydro tasks ? */
+  if (t_xv == NULL && c->hydro.density != NULL) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /* Make sure this cell has a valid tag. */
+    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
+#endif  // SWIFT_DEBUG_CHECKS
+
+    /* Create the tasks. */
+    t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, 0,
+                             c, NULL);
+    t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, c->mpi.tag,
+                              0, c, NULL);
+#ifdef EXTRA_HYDRO_LOOP
+    t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
+                                   c->mpi.tag, 0, c, NULL);
+#endif
+  }
+
+  c->mpi.hydro.recv_xv = t_xv;
+  c->mpi.hydro.recv_rho = t_rho;
+  c->mpi.hydro.recv_gradient = t_gradient;
+
+  /* Add dependencies. */
+  if (c->hydro.sorts != NULL) {
+    scheduler_addunlock(s, t_xv, c->hydro.sorts);
+    scheduler_addunlock(s, c->hydro.sorts, t_rho);
+  }
+
+  for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_xv, l->t);
+    scheduler_addunlock(s, l->t, t_rho);
+  }
+#ifdef EXTRA_HYDRO_LOOP
+  for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_rho, l->t);
+    scheduler_addunlock(s, l->t, t_gradient);
+  }
+  for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_gradient, l->t);
+  }
+#else
+  for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_rho, l->t);
+  }
+#endif
+
+  /* Make sure the density has been computed before the stars compute theirs. */
+  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_rho, l->t);
+  }
+
+  /* Recurse? */
+  if (c->split)
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add recv tasks for stars pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param c The foreign #cell.
+ * @param t_feedback The recv_feed #task, if it has already been created.
+ */
+void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
+                                struct task *t_feedback) {
+
+#ifdef WITH_MPI
+  struct scheduler *s = &e->sched;
+
+  /* Have we reached a level where there are any stars tasks ? */
+  if (t_feedback == NULL && c->stars.density != NULL) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /* Make sure this cell has a valid tag. */
+    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
+#endif  // SWIFT_DEBUG_CHECKS
+
+    /* Create the tasks. */
+    t_feedback = scheduler_addtask(s, task_type_recv, task_subtype_spart,
+                                   c->mpi.tag, 0, c, NULL);
+  }
+
+  c->mpi.stars.recv = t_feedback;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID == e->nodeID) error("Local cell!");
+#endif
+  if (c->stars.sorts != NULL)
+    scheduler_addunlock(s, t_feedback, c->stars.sorts);
+
+  for (struct link *l = c->stars.density; l != NULL; l = l->next) {
+    scheduler_addunlock(s, l->t, t_feedback);
+  }
+
+  for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_feedback, l->t);
+  }
+
+  /* Recurse? */
+  if (c->split)
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_addtasks_recv_stars(e, c->progeny[k], t_feedback);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param c The foreign #cell.
+ * @param t_grav The recv_gpart #task, if it has already been created.
+ */
+void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
+                                  struct task *t_grav) {
+
+#ifdef WITH_MPI
+  struct scheduler *s = &e->sched;
+
+  /* Have we reached a level where there are any gravity tasks ? */
+  if (t_grav == NULL && c->grav.grav != NULL) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /* Make sure this cell has a valid tag. */
+    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
+#endif  // SWIFT_DEBUG_CHECKS
+
+    /* Create the tasks. */
+    t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart,
+                               c->mpi.tag, 0, c, NULL);
+  }
+
+  c->mpi.grav.recv = t_grav;
+
+  for (struct link *l = c->grav.grav; l != NULL; l = l->next)
+    scheduler_addunlock(s, t_grav, l->t);
+
+  /* Recurse? */
+  if (c->split)
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Add recv tasks for gravity pairs to a hierarchy of cells.
+ *
+ * @param e The #engine.
+ * @param c The foreign #cell.
+ * @param t_ti The recv_ti #task, if already been created.
+ * @param t_limiter The recv_limiter #task, if already created.
+ * @param with_limiter Are we running with the time-step limiter?
+ */
+void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
+                                   struct task *t_ti, struct task *t_limiter,
+                                   const int with_limiter) {
+
+#ifdef WITH_MPI
+  struct scheduler *s = &e->sched;
+
+  /* Have we reached a level where there are any self/pair tasks ? */
+  if (t_ti == NULL && (c->grav.grav != NULL || c->hydro.density != NULL)) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+    /* Make sure this cell has a valid tag. */
+    if (c->mpi.tag < 0) error("Trying to receive from untagged cell.");
+#endif  // SWIFT_DEBUG_CHECKS
+
+    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend, c->mpi.tag,
+                             0, c, NULL);
+
+    if (with_limiter)
+      t_limiter = scheduler_addtask(s, task_type_recv, task_subtype_limiter,
+                                    c->mpi.tag, 0, c, NULL);
+  }
+
+  c->mpi.recv_ti = t_ti;
+
+  for (struct link *l = c->grav.grav; l != NULL; l = l->next) {
+    scheduler_addunlock(s, l->t, t_ti);
+  }
+
+  if (with_limiter) {
+
+    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
+      scheduler_addunlock(s, l->t, t_limiter);
+    }
+
+    for (struct link *l = c->hydro.limiter; l != NULL; l = l->next) {
+      scheduler_addunlock(s, t_limiter, l->t);
+      scheduler_addunlock(s, l->t, t_ti);
+    }
+
+  } else {
+
+    for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
+      scheduler_addunlock(s, l->t, t_ti);
+    }
+  }
+
+  for (struct link *l = c->stars.feedback; l != NULL; l = l->next)
+    scheduler_addunlock(s, l->t, t_ti);
+
+  /* Recurse? */
+  if (c->split)
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_addtasks_recv_timestep(e, c->progeny[k], t_ti, t_limiter,
+                                      with_limiter);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
+ * i.e. all the O(Npart) tasks -- timestep version
+ *
+ * Tasks are only created here. The dependencies will be added later on.
+ *
+ * Note that there is no need to recurse below the super-cell. Note also
+ * that we only add tasks if the relevant particles are present in the cell.
+ *
+ * @param e The #engine.
+ * @param c The #cell.
+ */
+void engine_make_hierarchical_tasks_common(struct engine *e, struct cell *c) {
+
+  struct scheduler *s = &e->sched;
+  const int with_limiter = (e->policy & engine_policy_limiter);
+
+  /* Are we in a super-cell ? */
+  if (c->super == c) {
+
+    /* Local tasks only... */
+    if (c->nodeID == e->nodeID) {
+
+      /* Add the two half kicks */
+      c->kick1 = scheduler_addtask(s, task_type_kick1, task_subtype_none, 0, 0,
+                                   c, NULL);
+
+#if defined(WITH_LOGGER)
+      c->logger = scheduler_addtask(s, task_type_logger, task_subtype_none, 0,
+                                    0, c, NULL);
+#endif
+
+      c->kick2 = scheduler_addtask(s, task_type_kick2, task_subtype_none, 0, 0,
+                                   c, NULL);
+
+      /* Add the time-step calculation task and its dependency */
+      c->timestep = scheduler_addtask(s, task_type_timestep, task_subtype_none,
+                                      0, 0, c, NULL);
+
+      scheduler_addunlock(s, c->kick2, c->timestep);
+      scheduler_addunlock(s, c->timestep, c->kick1);
+
+      /* Time-step limiting */
+      if (with_limiter) {
+        c->timestep_limiter = scheduler_addtask(
+            s, task_type_timestep_limiter, task_subtype_none, 0, 0, c, NULL);
+
+        /* Make sure it is not run before kick2 */
+        scheduler_addunlock(s, c->timestep, c->timestep_limiter);
+        scheduler_addunlock(s, c->timestep_limiter, c->kick1);
+      }
+
+#if defined(WITH_LOGGER)
+      scheduler_addunlock(s, c->kick1, c->logger);
+#endif
+    }
+  } else { /* We are above the super-cell so need to go deeper */
+
+    /* Recurse. */
+    if (c->split)
+      for (int k = 0; k < 8; k++)
+        if (c->progeny[k] != NULL)
+          engine_make_hierarchical_tasks_common(e, c->progeny[k]);
+  }
+}
+
+/**
+ * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
+ * i.e. all the O(Npart) tasks -- gravity version
+ *
+ * Tasks are only created here. The dependencies will be added later on.
+ *
+ * Note that there is no need to recurse below the super-cell. Note also
+ * that we only add tasks if the relevant particles are present in the cell.
+ *
+ * @param e The #engine.
+ * @param c The #cell.
+ */
+void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {
+
+  struct scheduler *s = &e->sched;
+  const int periodic = e->s->periodic;
+  const int is_self_gravity = (e->policy & engine_policy_self_gravity);
+
+  /* Are we in a super-cell ? */
+  if (c->grav.super == c) {
+
+    /* Local tasks only... */
+    if (c->nodeID == e->nodeID) {
+
+      c->grav.drift = scheduler_addtask(s, task_type_drift_gpart,
+                                        task_subtype_none, 0, 0, c, NULL);
+
+      c->grav.end_force = scheduler_addtask(s, task_type_end_grav_force,
+                                            task_subtype_none, 0, 0, c, NULL);
+
+      scheduler_addunlock(s, c->grav.end_force, c->super->kick2);
+
+      if (is_self_gravity) {
+
+        /* Initialisation of the multipoles */
+        c->grav.init = scheduler_addtask(s, task_type_init_grav,
+                                         task_subtype_none, 0, 0, c, NULL);
+
+        /* Gravity non-neighbouring pm calculations */
+        c->grav.long_range = scheduler_addtask(
+            s, task_type_grav_long_range, task_subtype_none, 0, 0, c, NULL);
+
+        /* Gravity recursive down-pass */
+        c->grav.down = scheduler_addtask(s, task_type_grav_down,
+                                         task_subtype_none, 0, 0, c, NULL);
+
+        /* Implicit tasks for the up and down passes */
+        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
+                                              task_subtype_none, 0, 1, c, NULL);
+        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
+                                             task_subtype_none, 0, 1, c, NULL);
+        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
+                                            task_subtype_none, 0, 1, c, NULL);
+
+        /* Gravity mesh force propagation */
+        if (periodic)
+          c->grav.mesh = scheduler_addtask(s, task_type_grav_mesh,
+                                           task_subtype_none, 0, 0, c, NULL);
+
+        if (periodic) scheduler_addunlock(s, c->grav.drift, c->grav.mesh);
+        if (periodic) scheduler_addunlock(s, c->grav.mesh, c->grav.down);
+        scheduler_addunlock(s, c->grav.init, c->grav.long_range);
+        scheduler_addunlock(s, c->grav.long_range, c->grav.down);
+        scheduler_addunlock(s, c->grav.down, c->grav.super->grav.end_force);
+
+        /* Link in the implicit tasks */
+        scheduler_addunlock(s, c->grav.init, c->grav.init_out);
+        scheduler_addunlock(s, c->grav.drift, c->grav.drift_out);
+        scheduler_addunlock(s, c->grav.down_in, c->grav.down);
+      }
+    }
+  }
+
+  /* We are below the super-cell but not below the maximal splitting depth */
+  else if ((c->grav.super != NULL) &&
+           ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
+
+    /* Local tasks only... */
+    if (c->nodeID == e->nodeID) {
+
+      if (is_self_gravity) {
+
+        c->grav.drift_out = scheduler_addtask(s, task_type_drift_gpart_out,
+                                              task_subtype_none, 0, 1, c, NULL);
+
+        c->grav.init_out = scheduler_addtask(s, task_type_init_grav_out,
+                                             task_subtype_none, 0, 1, c, NULL);
+
+        c->grav.down_in = scheduler_addtask(s, task_type_grav_down_in,
+                                            task_subtype_none, 0, 1, c, NULL);
+
+        scheduler_addunlock(s, c->parent->grav.init_out, c->grav.init_out);
+        scheduler_addunlock(s, c->parent->grav.drift_out, c->grav.drift_out);
+        scheduler_addunlock(s, c->grav.down_in, c->parent->grav.down_in);
+      }
+    }
+  }
+
+  /* Recurse but not below the maximal splitting depth */
+  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav))
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_make_hierarchical_tasks_gravity(e, c->progeny[k]);
+}
+
+/**
+ * @brief Recursively add non-implicit ghost tasks to a cell hierarchy.
+ */
+void engine_add_ghosts(struct engine *e, struct cell *c, struct task *ghost_in,
+                       struct task *ghost_out) {
+
+  /* Abort as there are no hydro particles here? */
+  if (c->hydro.count_total == 0) return;
+
+  /* If we have reached the leaf OR have to few particles to play with*/
+  if (!c->split || c->hydro.count_total < engine_max_parts_per_ghost) {
+
+    /* Add the ghost task and its dependencies */
+    struct scheduler *s = &e->sched;
+    c->hydro.ghost =
+        scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0, c, NULL);
+    scheduler_addunlock(s, ghost_in, c->hydro.ghost);
+    scheduler_addunlock(s, c->hydro.ghost, ghost_out);
+
+  } else {
+    /* Keep recursing */
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL)
+        engine_add_ghosts(e, c->progeny[k], ghost_in, ghost_out);
+  }
+}
+
+/**
+ * @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
+ * i.e. all the O(Npart) tasks -- hydro version
+ *
+ * Tasks are only created here. The dependencies will be added later on.
+ *
+ * Note that there is no need to recurse below the super-cell. Note also
+ * that we only add tasks if the relevant particles are present in the cell.
+ *
+ * @param e The #engine.
+ * @param c The #cell.
+ */
+void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c) {
+
+  struct scheduler *s = &e->sched;
+  const int with_stars = (e->policy & engine_policy_stars);
+  const int with_feedback = (e->policy & engine_policy_feedback);
+  const int with_cooling = (e->policy & engine_policy_cooling);
+  const int with_star_formation = (e->policy & engine_policy_star_formation);
+
+  /* Are we in a super-cell ? */
+  if (c->hydro.super == c) {
+
+    /* Add the sort task. */
+    c->hydro.sorts =
+        scheduler_addtask(s, task_type_sort, task_subtype_none, 0, 0, c, NULL);
+
+    if (with_feedback) {
+      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
+                                         task_subtype_none, 0, 0, c, NULL);
+    }
+
+    /* Local tasks only... */
+    if (c->nodeID == e->nodeID) {
+
+      /* Add the drift task. */
+      c->hydro.drift = scheduler_addtask(s, task_type_drift_part,
+                                         task_subtype_none, 0, 0, c, NULL);
+
+      /* Add the task finishing the force calculation */
+      c->hydro.end_force = scheduler_addtask(s, task_type_end_hydro_force,
+                                             task_subtype_none, 0, 0, c, NULL);
+
+      /* Generate the ghost tasks. */
+      c->hydro.ghost_in =
+          scheduler_addtask(s, task_type_ghost_in, task_subtype_none, 0,
+                            /* implicit = */ 1, c, NULL);
+      c->hydro.ghost_out =
+          scheduler_addtask(s, task_type_ghost_out, task_subtype_none, 0,
+                            /* implicit = */ 1, c, NULL);
+      engine_add_ghosts(e, c, c->hydro.ghost_in, c->hydro.ghost_out);
+
+      /* Generate the extra ghost task. */
+#ifdef EXTRA_HYDRO_LOOP
+      c->hydro.extra_ghost = scheduler_addtask(
+          s, task_type_extra_ghost, task_subtype_none, 0, 0, c, NULL);
+#endif
+
+      /* Stars */
+      if (with_stars) {
+        c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
+                                           task_subtype_none, 0, 0, c, NULL);
+        scheduler_addunlock(s, c->stars.drift, c->super->kick2);
+      }
+
+      /* Subgrid tasks: cooling */
+      if (with_cooling) {
+
+        c->hydro.cooling = scheduler_addtask(s, task_type_cooling,
+                                             task_subtype_none, 0, 0, c, NULL);
+
+        scheduler_addunlock(s, c->hydro.end_force, c->hydro.cooling);
+        scheduler_addunlock(s, c->hydro.cooling, c->super->kick2);
+
+      } else {
+        scheduler_addunlock(s, c->hydro.end_force, c->super->kick2);
+      }
+
+      /* Subgrid tasks: star formation */
+      if (with_star_formation) {
+
+        c->hydro.star_formation = scheduler_addtask(
+            s, task_type_star_formation, task_subtype_none, 0, 0, c, NULL);
+
+        scheduler_addunlock(s, c->super->kick2, c->hydro.star_formation);
+        scheduler_addunlock(s, c->hydro.star_formation, c->super->timestep);
+      }
+
+      /* Subgrid tasks: feedback */
+      if (with_feedback) {
+
+        c->stars.stars_in =
+            scheduler_addtask(s, task_type_stars_in, task_subtype_none, 0,
+                              /* implicit = */ 1, c, NULL);
+
+        c->stars.stars_out =
+            scheduler_addtask(s, task_type_stars_out, task_subtype_none, 0,
+                              /* implicit = */ 1, c, NULL);
+
+        c->stars.ghost = scheduler_addtask(s, task_type_stars_ghost,
+                                           task_subtype_none, 0, 0, c, NULL);
+
+        scheduler_addunlock(s, c->super->kick2, c->stars.stars_in);
+        scheduler_addunlock(s, c->stars.stars_out, c->super->timestep);
+
+        if (with_star_formation) {
+          scheduler_addunlock(s, c->hydro.star_formation, c->stars.stars_in);
+        }
+      }
+    }
+  } else { /* We are above the super-cell so need to go deeper */
+
+    /* Recurse. */
+    if (c->split)
+      for (int k = 0; k < 8; k++)
+        if (c->progeny[k] != NULL)
+          engine_make_hierarchical_tasks_hydro(e, c->progeny[k]);
+  }
+}
+
+void engine_make_hierarchical_tasks_mapper(void *map_data, int num_elements,
+                                           void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  const int with_hydro = (e->policy & engine_policy_hydro);
+  const int with_self_gravity = (e->policy & engine_policy_self_gravity);
+  const int with_ext_gravity = (e->policy & engine_policy_external_gravity);
+
+  for (int ind = 0; ind < num_elements; ind++) {
+    struct cell *c = &((struct cell *)map_data)[ind];
+    /* Make the common tasks (time integration) */
+    engine_make_hierarchical_tasks_common(e, c);
+    /* Add the hydro stuff */
+    if (with_hydro) engine_make_hierarchical_tasks_hydro(e, c);
+    /* And the gravity stuff */
+    if (with_self_gravity || with_ext_gravity)
+      engine_make_hierarchical_tasks_gravity(e, c);
+  }
+}
+
+/**
+ * @brief Constructs the top-level tasks for the short-range gravity
+ * and long-range gravity interactions.
+ *
+ * - All top-cells get a self task.
+ * - All pairs within range according to the multipole acceptance
+ *   criterion get a pair task.
+ */
+void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements,
+                                           void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  struct space *s = e->s;
+  struct scheduler *sched = &e->sched;
+  const int nodeID = e->nodeID;
+  const int periodic = s->periodic;
+  const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
+  const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
+  struct cell *cells = s->cells_top;
+  const double theta_crit = e->gravity_properties->theta_crit;
+  const double max_distance = e->mesh->r_cut_max;
+  const double max_distance2 = max_distance * max_distance;
+
+  /* Compute how many cells away we need to walk */
+  const double distance = 2.5 * cells[0].width[0] / theta_crit;
+  int delta = (int)(distance / cells[0].width[0]) + 1;
+  int delta_m = delta;
+  int delta_p = delta;
+
+  /* Special case where every cell is in range of every other one */
+  if (delta >= cdim[0] / 2) {
+    if (cdim[0] % 2 == 0) {
+      delta_m = cdim[0] / 2;
+      delta_p = cdim[0] / 2 - 1;
+    } else {
+      delta_m = cdim[0] / 2;
+      delta_p = cdim[0] / 2;
+    }
+  }
+
+  /* Loop through the elements, which are just byte offsets from NULL. */
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    /* Get the cell index. */
+    const int cid = (size_t)(map_data) + ind;
+
+    /* Integer indices of the cell in the top-level grid */
+    const int i = cid / (cdim[1] * cdim[2]);
+    const int j = (cid / cdim[2]) % cdim[1];
+    const int k = cid % cdim[2];
+
+    /* Get the cell */
+    struct cell *ci = &cells[cid];
+
+    /* Skip cells without gravity particles */
+    if (ci->grav.count == 0) continue;
+
+    /* If the cell is local build a self-interaction */
+    if (ci->nodeID == nodeID) {
+      scheduler_addtask(sched, task_type_self, task_subtype_grav, 0, 0, ci,
+                        NULL);
+    }
+
+    /* Loop over every other cell within (Manhattan) range delta */
+    for (int ii = -delta_m; ii <= delta_p; ii++) {
+      int iii = i + ii;
+      if (!periodic && (iii < 0 || iii >= cdim[0])) continue;
+      iii = (iii + cdim[0]) % cdim[0];
+      for (int jj = -delta_m; jj <= delta_p; jj++) {
+        int jjj = j + jj;
+        if (!periodic && (jjj < 0 || jjj >= cdim[1])) continue;
+        jjj = (jjj + cdim[1]) % cdim[1];
+        for (int kk = -delta_m; kk <= delta_p; kk++) {
+          int kkk = k + kk;
+          if (!periodic && (kkk < 0 || kkk >= cdim[2])) continue;
+          kkk = (kkk + cdim[2]) % cdim[2];
+
+          /* Get the cell */
+          const int cjd = cell_getid(cdim, iii, jjj, kkk);
+          struct cell *cj = &cells[cjd];
+
+          /* Avoid duplicates, empty cells and completely foreign pairs */
+          if (cid >= cjd || cj->grav.count == 0 ||
+              (ci->nodeID != nodeID && cj->nodeID != nodeID))
+            continue;
+
+          /* Recover the multipole information */
+          const struct gravity_tensors *multi_i = ci->grav.multipole;
+          const struct gravity_tensors *multi_j = cj->grav.multipole;
+
+          if (multi_i == NULL && ci->nodeID != nodeID)
+            error("Multipole of ci was not exchanged properly via the proxies");
+          if (multi_j == NULL && cj->nodeID != nodeID)
+            error("Multipole of cj was not exchanged properly via the proxies");
+
+          /* Minimal distance between any pair of particles */
+          const double min_radius2 =
+              cell_min_dist2_same_size(ci, cj, periodic, dim);
+
+          /* Are we beyond the distance where the truncated forces are 0 ?*/
+          if (periodic && min_radius2 > max_distance2) continue;
+
+          /* Are the cells too close for a MM interaction ? */
+          if (!cell_can_use_pair_mm_rebuild(ci, cj, e, s)) {
+
+            /* Ok, we need to add a direct pair calculation */
+            scheduler_addtask(sched, task_type_pair, task_subtype_grav, 0, 0,
+                              ci, cj);
+
+#ifdef SWIFT_DEBUG_CHECKS
+#ifdef WITH_MPI
+
+            /* Let's cross-check that we had a proxy for that cell */
+            if (ci->nodeID == nodeID && cj->nodeID != engine_rank) {
+
+              /* Find the proxy for this node */
+              const int proxy_id = e->proxy_ind[cj->nodeID];
+              if (proxy_id < 0)
+                error("No proxy exists for that foreign node %d!", cj->nodeID);
+
+              const struct proxy *p = &e->proxies[proxy_id];
+
+              /* Check whether the cell exists in the proxy */
+              int n = 0;
+              for (; n < p->nr_cells_in; n++)
+                if (p->cells_in[n] == cj) {
+                  break;
+                }
+              if (n == p->nr_cells_in)
+                error(
+                    "Cell %d not found in the proxy but trying to construct "
+                    "grav task!",
+                    cjd);
+            } else if (cj->nodeID == nodeID && ci->nodeID != engine_rank) {
+
+              /* Find the proxy for this node */
+              const int proxy_id = e->proxy_ind[ci->nodeID];
+              if (proxy_id < 0)
+                error("No proxy exists for that foreign node %d!", ci->nodeID);
+
+              const struct proxy *p = &e->proxies[proxy_id];
+
+              /* Check whether the cell exists in the proxy */
+              int n = 0;
+              for (; n < p->nr_cells_in; n++)
+                if (p->cells_in[n] == ci) {
+                  break;
+                }
+              if (n == p->nr_cells_in)
+                error(
+                    "Cell %d not found in the proxy but trying to construct "
+                    "grav task!",
+                    cid);
+            }
+#endif /* WITH_MPI */
+#endif /* SWIFT_DEBUG_CHECKS */
+          }
+        }
+      }
+    }
+  }
+}
+
+/**
+ * @brief Constructs the top-level tasks for the external gravity.
+ *
+ * @param e The #engine.
+ */
+void engine_make_external_gravity_tasks(struct engine *e) {
+
+  struct space *s = e->s;
+  struct scheduler *sched = &e->sched;
+  const int nodeID = e->nodeID;
+  struct cell *cells = s->cells_top;
+  const int nr_cells = s->nr_cells;
+
+  for (int cid = 0; cid < nr_cells; ++cid) {
+
+    struct cell *ci = &cells[cid];
+
+    /* Skip cells without gravity particles */
+    if (ci->grav.count == 0) continue;
+
+    /* Is that neighbour local ? */
+    if (ci->nodeID != nodeID) continue;
+
+    /* If the cell is local, build a self-interaction */
+    scheduler_addtask(sched, task_type_self, task_subtype_external_grav, 0, 0,
+                      ci, NULL);
+  }
+}
+
+/**
+ * @brief Counts the tasks associated with one cell and constructs the links
+ *
+ * For each hydrodynamic and gravity task, construct the links with
+ * the corresponding cell.  Similarly, construct the dependencies for
+ * all the sorting tasks.
+ */
+void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
+                                        void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  struct scheduler *const sched = &e->sched;
+
+  for (int ind = 0; ind < num_elements; ind++) {
+    struct task *t = &((struct task *)map_data)[ind];
+
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+    const enum task_types t_type = t->type;
+    const enum task_subtypes t_subtype = t->subtype;
+
+    /* Link sort tasks to all the higher sort task. */
+    if (t_type == task_type_sort) {
+      for (struct cell *finger = t->ci->parent; finger != NULL;
+           finger = finger->parent)
+        if (finger->hydro.sorts != NULL)
+          scheduler_addunlock(sched, t, finger->hydro.sorts);
+    }
+
+    /* Link stars sort tasks to all the higher sort task. */
+    if (t_type == task_type_stars_sort) {
+      for (struct cell *finger = t->ci->parent; finger != NULL;
+           finger = finger->parent) {
+        if (finger->stars.sorts != NULL)
+          scheduler_addunlock(sched, t, finger->stars.sorts);
+      }
+    }
+
+    /* Link self tasks to cells. */
+    else if (t_type == task_type_self) {
+      atomic_inc(&ci->nr_tasks);
+
+      if (t_subtype == task_subtype_density) {
+        engine_addlink(e, &ci->hydro.density, t);
+      } else if (t_subtype == task_subtype_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+      } else if (t_subtype == task_subtype_external_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+      } else if (t->subtype == task_subtype_stars_density) {
+        engine_addlink(e, &ci->stars.density, t);
+      } else if (t->subtype == task_subtype_stars_feedback) {
+        engine_addlink(e, &ci->stars.feedback, t);
+      }
+
+      /* Link pair tasks to cells. */
+    } else if (t_type == task_type_pair) {
+      atomic_inc(&ci->nr_tasks);
+      atomic_inc(&cj->nr_tasks);
+
+      if (t_subtype == task_subtype_density) {
+        engine_addlink(e, &ci->hydro.density, t);
+        engine_addlink(e, &cj->hydro.density, t);
+      } else if (t_subtype == task_subtype_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+        engine_addlink(e, &cj->grav.grav, t);
+      } else if (t->subtype == task_subtype_stars_density) {
+        engine_addlink(e, &ci->stars.density, t);
+        engine_addlink(e, &cj->stars.density, t);
+      } else if (t->subtype == task_subtype_stars_feedback) {
+        engine_addlink(e, &ci->stars.feedback, t);
+        engine_addlink(e, &cj->stars.feedback, t);
+      }
+#ifdef SWIFT_DEBUG_CHECKS
+      else if (t_subtype == task_subtype_external_grav) {
+        error("Found a pair/external-gravity task...");
+      }
+#endif
+
+      /* Link sub-self tasks to cells. */
+    } else if (t_type == task_type_sub_self) {
+      atomic_inc(&ci->nr_tasks);
+
+      if (t_subtype == task_subtype_density) {
+        engine_addlink(e, &ci->hydro.density, t);
+      } else if (t_subtype == task_subtype_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+      } else if (t_subtype == task_subtype_external_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+      } else if (t->subtype == task_subtype_stars_density) {
+        engine_addlink(e, &ci->stars.density, t);
+      } else if (t->subtype == task_subtype_stars_feedback) {
+        engine_addlink(e, &ci->stars.feedback, t);
+      }
+
+      /* Link sub-pair tasks to cells. */
+    } else if (t_type == task_type_sub_pair) {
+      atomic_inc(&ci->nr_tasks);
+      atomic_inc(&cj->nr_tasks);
+
+      if (t_subtype == task_subtype_density) {
+        engine_addlink(e, &ci->hydro.density, t);
+        engine_addlink(e, &cj->hydro.density, t);
+      } else if (t_subtype == task_subtype_grav) {
+        engine_addlink(e, &ci->grav.grav, t);
+        engine_addlink(e, &cj->grav.grav, t);
+      } else if (t->subtype == task_subtype_stars_density) {
+        engine_addlink(e, &ci->stars.density, t);
+        engine_addlink(e, &cj->stars.density, t);
+      } else if (t->subtype == task_subtype_stars_feedback) {
+        engine_addlink(e, &ci->stars.feedback, t);
+        engine_addlink(e, &cj->stars.feedback, t);
+      }
+#ifdef SWIFT_DEBUG_CHECKS
+      else if (t_subtype == task_subtype_external_grav) {
+        error("Found a sub-pair/external-gravity task...");
+      }
+#endif
+
+      /* Multipole-multipole interaction of progenies */
+    } else if (t_type == task_type_grav_mm) {
+
+      atomic_inc(&ci->grav.nr_mm_tasks);
+      atomic_inc(&cj->grav.nr_mm_tasks);
+      engine_addlink(e, &ci->grav.mm, t);
+      engine_addlink(e, &cj->grav.mm, t);
+    }
+  }
+}
+
+/**
+ * @brief Creates all the task dependencies for the gravity
+ *
+ * @param e The #engine
+ */
+void engine_link_gravity_tasks(struct engine *e) {
+
+  struct scheduler *sched = &e->sched;
+  const int nodeID = e->nodeID;
+  const int nr_tasks = sched->nr_tasks;
+
+  for (int k = 0; k < nr_tasks; k++) {
+
+    /* Get a pointer to the task. */
+    struct task *t = &sched->tasks[k];
+
+    if (t->type == task_type_none) continue;
+
+    /* Get the cells we act on */
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+    const enum task_types t_type = t->type;
+    const enum task_subtypes t_subtype = t->subtype;
+
+    /* Pointers to the parent cells for tasks going up and down the tree
+     * In the case where we are at the super-level we don't
+     * want the parent as no tasks are defined above that level. */
+    struct cell *ci_parent, *cj_parent;
+    if (ci->parent != NULL && ci->grav.super != ci)
+      ci_parent = ci->parent;
+    else
+      ci_parent = ci;
+
+    if (cj != NULL && cj->parent != NULL && cj->grav.super != cj)
+      cj_parent = cj->parent;
+    else
+      cj_parent = cj;
+
+/* Node ID (if running with MPI) */
+#ifdef WITH_MPI
+    const int ci_nodeID = ci->nodeID;
+    const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+    const int ci_nodeID = nodeID;
+    const int cj_nodeID = nodeID;
+#endif
+
+    /* Self-interaction for self-gravity? */
+    if (t_type == task_type_self && t_subtype == task_subtype_grav) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (ci_nodeID != nodeID) error("Non-local self task");
+#endif
+
+      /* drift ---+-> gravity --> grav_down */
+      /* init  --/    */
+      scheduler_addunlock(sched, ci_parent->grav.drift_out, t);
+      scheduler_addunlock(sched, ci_parent->grav.init_out, t);
+      scheduler_addunlock(sched, t, ci_parent->grav.down_in);
+    }
+
+    /* Self-interaction for external gravity ? */
+    if (t_type == task_type_self && t_subtype == task_subtype_external_grav) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (ci_nodeID != nodeID) error("Non-local self task");
+#endif
+
+      /* drift -----> gravity --> end_gravity_force */
+      scheduler_addunlock(sched, ci->grav.super->grav.drift, t);
+      scheduler_addunlock(sched, t, ci->grav.super->grav.end_force);
+    }
+
+    /* Otherwise, pair interaction? */
+    else if (t_type == task_type_pair && t_subtype == task_subtype_grav) {
+
+      if (ci_nodeID == nodeID) {
+
+        /* drift ---+-> gravity --> grav_down */
+        /* init  --/    */
+        scheduler_addunlock(sched, ci_parent->grav.drift_out, t);
+        scheduler_addunlock(sched, ci_parent->grav.init_out, t);
+        scheduler_addunlock(sched, t, ci_parent->grav.down_in);
+      }
+      if (cj_nodeID == nodeID) {
+
+        /* drift ---+-> gravity --> grav_down */
+        /* init  --/    */
+        if (ci_parent != cj_parent) { /* Avoid double unlock */
+          scheduler_addunlock(sched, cj_parent->grav.drift_out, t);
+          scheduler_addunlock(sched, cj_parent->grav.init_out, t);
+          scheduler_addunlock(sched, t, cj_parent->grav.down_in);
+        }
+      }
+    }
+
+    /* Otherwise, sub-self interaction? */
+    else if (t_type == task_type_sub_self && t_subtype == task_subtype_grav) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (ci_nodeID != nodeID) error("Non-local sub-self task");
+#endif
+      /* drift ---+-> gravity --> grav_down */
+      /* init  --/    */
+      scheduler_addunlock(sched, ci_parent->grav.drift_out, t);
+      scheduler_addunlock(sched, ci_parent->grav.init_out, t);
+      scheduler_addunlock(sched, t, ci_parent->grav.down_in);
+    }
+
+    /* Sub-self-interaction for external gravity ? */
+    else if (t_type == task_type_sub_self &&
+             t_subtype == task_subtype_external_grav) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (ci_nodeID != nodeID) error("Non-local sub-self task");
+#endif
+
+      /* drift -----> gravity --> end_force */
+      scheduler_addunlock(sched, ci->grav.super->grav.drift, t);
+      scheduler_addunlock(sched, t, ci->grav.super->grav.end_force);
+    }
+
+    /* Otherwise, sub-pair interaction? */
+    else if (t_type == task_type_sub_pair && t_subtype == task_subtype_grav) {
+
+      if (ci_nodeID == nodeID) {
+
+        /* drift ---+-> gravity --> grav_down */
+        /* init  --/    */
+        scheduler_addunlock(sched, ci_parent->grav.drift_out, t);
+        scheduler_addunlock(sched, ci_parent->grav.init_out, t);
+        scheduler_addunlock(sched, t, ci_parent->grav.down_in);
+      }
+      if (cj_nodeID == nodeID) {
+
+        /* drift ---+-> gravity --> grav_down */
+        /* init  --/    */
+        if (ci_parent != cj_parent) { /* Avoid double unlock */
+          scheduler_addunlock(sched, cj_parent->grav.drift_out, t);
+          scheduler_addunlock(sched, cj_parent->grav.init_out, t);
+          scheduler_addunlock(sched, t, cj_parent->grav.down_in);
+        }
+      }
+    }
+
+    /* Otherwise M-M interaction? */
+    else if (t_type == task_type_grav_mm) {
+
+      if (ci_nodeID == nodeID) {
+
+        /* init -----> gravity --> grav_down */
+        scheduler_addunlock(sched, ci_parent->grav.init_out, t);
+        scheduler_addunlock(sched, t, ci_parent->grav.down_in);
+      }
+      if (cj_nodeID == nodeID) {
+
+        /* init -----> gravity --> grav_down */
+        if (ci_parent != cj_parent) { /* Avoid double unlock */
+          scheduler_addunlock(sched, cj_parent->grav.init_out, t);
+          scheduler_addunlock(sched, t, cj_parent->grav.down_in);
+        }
+      }
+    }
+  }
+}
+
+#ifdef EXTRA_HYDRO_LOOP
+
+/**
+ * @brief Creates the dependency network for the hydro tasks of a given cell.
+ *
+ * @param sched The #scheduler.
+ * @param density The density task to link.
+ * @param gradient The gradient task to link.
+ * @param force The force task to link.
+ * @param limiter The limiter task to link.
+ * @param c The cell.
+ * @param with_cooling Do we have a cooling task ?
+ * @param with_limiter Do we have a time-step limiter ?
+ */
+static inline void engine_make_hydro_loops_dependencies(
+    struct scheduler *sched, struct task *density, struct task *gradient,
+    struct task *force, struct task *limiter, struct cell *c, int with_cooling,
+    int with_limiter) {
+
+  /* density loop --> ghost --> gradient loop --> extra_ghost */
+  /* extra_ghost --> force loop  */
+  scheduler_addunlock(sched, density, c->hydro.super->hydro.ghost_in);
+  scheduler_addunlock(sched, c->hydro.super->hydro.ghost_out, gradient);
+  scheduler_addunlock(sched, gradient, c->hydro.super->hydro.extra_ghost);
+  scheduler_addunlock(sched, c->hydro.super->hydro.extra_ghost, force);
+}
+
+#else
+
+/**
+ * @brief Creates the dependency network for the hydro tasks of a given cell.
+ *
+ * @param sched The #scheduler.
+ * @param density The density task to link.
+ * @param force The force task to link.
+ * @param limiter The limiter task to link.
+ * @param c The cell.
+ * @param with_cooling Are we running with cooling switched on?
+ * @param with_limiter Are we running with limiter switched on?
+ */
+static inline void engine_make_hydro_loops_dependencies(
+    struct scheduler *sched, struct task *density, struct task *force,
+    struct task *limiter, struct cell *c, int with_cooling, int with_limiter) {
+
+  /* density loop --> ghost --> force loop */
+  scheduler_addunlock(sched, density, c->hydro.super->hydro.ghost_in);
+  scheduler_addunlock(sched, c->hydro.super->hydro.ghost_out, force);
+}
+
+#endif
+
+/**
+ * @brief Duplicates the first hydro loop and construct all the
+ * dependencies for the hydro part
+ *
+ * This is done by looping over all the previously constructed tasks
+ * and adding another task involving the same cells but this time
+ * corresponding to the second hydro loop over neighbours.
+ * With all the relevant tasks for a given cell available, we construct
+ * all the dependencies for that cell.
+ */
+void engine_make_extra_hydroloop_tasks_mapper(void *map_data, int num_elements,
+                                              void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  struct scheduler *sched = &e->sched;
+  const int nodeID = e->nodeID;
+  const int with_cooling = (e->policy & engine_policy_cooling);
+  const int with_limiter = (e->policy & engine_policy_limiter);
+  const int with_feedback = (e->policy & engine_policy_feedback);
+#ifdef EXTRA_HYDRO_LOOP
+  struct task *t_gradient = NULL;
+#endif
+  struct task *t_force = NULL;
+  struct task *t_limiter = NULL;
+  struct task *t_star_density = NULL;
+  struct task *t_star_feedback = NULL;
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    struct task *t = &((struct task *)map_data)[ind];
+    const enum task_types t_type = t->type;
+    const enum task_subtypes t_subtype = t->subtype;
+    const long long flags = t->flags;
+    struct cell *ci = t->ci;
+    struct cell *cj = t->cj;
+
+    /* Sort tasks depend on the drift of the cell (gas version). */
+    if (t_type == task_type_sort && ci->nodeID == nodeID) {
+      scheduler_addunlock(sched, ci->hydro.super->hydro.drift, t);
+    }
+
+    /* Sort tasks depend on the drift of the cell (stars version). */
+    else if (t_type == task_type_stars_sort && ci->nodeID == nodeID) {
+      scheduler_addunlock(sched, ci->hydro.super->stars.drift, t);
+    }
+
+    /* Self-interaction? */
+    else if (t_type == task_type_self && t_subtype == task_subtype_density) {
+
+      /* Make the self-density tasks depend on the drift only. */
+      scheduler_addunlock(sched, ci->hydro.super->hydro.drift, t);
+
+      /* Task for the second hydro loop, */
+      t_force = scheduler_addtask(sched, task_type_self, task_subtype_force,
+                                  flags, 0, ci, NULL);
+
+      /* the task for the time-step limiter */
+      if (with_limiter) {
+        t_limiter = scheduler_addtask(sched, task_type_self,
+                                      task_subtype_limiter, flags, 0, ci, NULL);
+      }
+
+      /* The stellar feedback tasks */
+      if (with_feedback) {
+        t_star_density =
+            scheduler_addtask(sched, task_type_self, task_subtype_stars_density,
+                              flags, 0, ci, NULL);
+        t_star_feedback =
+            scheduler_addtask(sched, task_type_self,
+                              task_subtype_stars_feedback, flags, 0, ci, NULL);
+      }
+
+      /* Link the tasks to the cells */
+      engine_addlink(e, &ci->hydro.force, t_force);
+      if (with_limiter) {
+        engine_addlink(e, &ci->hydro.limiter, t_limiter);
+      }
+      if (with_feedback) {
+        engine_addlink(e, &ci->stars.density, t_star_density);
+        engine_addlink(e, &ci->stars.feedback, t_star_feedback);
+      }
+
+#ifdef EXTRA_HYDRO_LOOP
+
+      /* Same work for the additional hydro loop */
+      t_gradient = scheduler_addtask(sched, task_type_self,
+                                     task_subtype_gradient, flags, 0, ci, NULL);
+
+      /* Add the link between the new loops and the cell */
+      engine_addlink(e, &ci->hydro.gradient, t_gradient);
+
+      /* Now, build all the dependencies for the hydro */
+      engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                           t_limiter, ci, with_cooling,
+                                           with_limiter);
+#else
+
+      /* Now, build all the dependencies for the hydro */
+      engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, ci,
+                                           with_cooling, with_limiter);
+#endif
+
+      /* Create the task dependencies */
+      scheduler_addunlock(sched, t_force, ci->hydro.super->hydro.end_force);
+
+      if (with_feedback) {
+
+        scheduler_addunlock(sched, ci->hydro.super->stars.drift,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->hydro.drift,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->stars.stars_in,
+                            t_star_density);
+        scheduler_addunlock(sched, t_star_density,
+                            ci->hydro.super->stars.ghost);
+        scheduler_addunlock(sched, ci->hydro.super->stars.ghost,
+                            t_star_feedback);
+        scheduler_addunlock(sched, t_star_feedback,
+                            ci->hydro.super->stars.stars_out);
+      }
+
+      if (with_limiter) {
+        scheduler_addunlock(sched, ci->super->kick2, t_limiter);
+        scheduler_addunlock(sched, t_limiter, ci->super->timestep);
+        scheduler_addunlock(sched, t_limiter, ci->super->timestep_limiter);
+      }
+    }
+
+    /* Otherwise, pair interaction? */
+    else if (t_type == task_type_pair && t_subtype == task_subtype_density) {
+
+      /* Make all density tasks depend on the drift */
+      if (ci->nodeID == nodeID) {
+        scheduler_addunlock(sched, ci->hydro.super->hydro.drift, t);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        scheduler_addunlock(sched, cj->hydro.super->hydro.drift, t);
+      }
+
+      /* Make all density tasks depend on the sorts */
+      scheduler_addunlock(sched, ci->hydro.super->hydro.sorts, t);
+      if (ci->hydro.super != cj->hydro.super) {
+        scheduler_addunlock(sched, cj->hydro.super->hydro.sorts, t);
+      }
+
+      /* New task for the force */
+      t_force = scheduler_addtask(sched, task_type_pair, task_subtype_force,
+                                  flags, 0, ci, cj);
+
+      /* and the task for the time-step limiter */
+      if (with_limiter) {
+        t_limiter = scheduler_addtask(sched, task_type_pair,
+                                      task_subtype_limiter, flags, 0, ci, cj);
+      }
+
+      /* The stellar feedback tasks */
+      if (with_feedback) {
+        t_star_density =
+            scheduler_addtask(sched, task_type_pair, task_subtype_stars_density,
+                              flags, 0, ci, cj);
+        t_star_feedback =
+            scheduler_addtask(sched, task_type_pair,
+                              task_subtype_stars_feedback, flags, 0, ci, cj);
+      }
+
+      engine_addlink(e, &ci->hydro.force, t_force);
+      engine_addlink(e, &cj->hydro.force, t_force);
+      if (with_limiter) {
+        engine_addlink(e, &ci->hydro.limiter, t_limiter);
+        engine_addlink(e, &cj->hydro.limiter, t_limiter);
+      }
+      if (with_feedback) {
+        engine_addlink(e, &ci->stars.density, t_star_density);
+        engine_addlink(e, &cj->stars.density, t_star_density);
+        engine_addlink(e, &ci->stars.feedback, t_star_feedback);
+        engine_addlink(e, &cj->stars.feedback, t_star_feedback);
+      }
+
+#ifdef EXTRA_HYDRO_LOOP
+
+      /* Start by constructing the task for the second and third hydro loop */
+      t_gradient = scheduler_addtask(sched, task_type_pair,
+                                     task_subtype_gradient, flags, 0, ci, cj);
+
+      /* Add the link between the new loop and both cells */
+      engine_addlink(e, &ci->hydro.gradient, t_gradient);
+      engine_addlink(e, &cj->hydro.gradient, t_gradient);
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      if (ci->nodeID == nodeID) {
+        engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                             t_limiter, ci, with_cooling,
+                                             with_limiter);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                             t_limiter, cj, with_cooling,
+                                             with_limiter);
+      }
+#else
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      if (ci->nodeID == nodeID) {
+        engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, ci,
+                                             with_cooling, with_limiter);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, cj,
+                                             with_cooling, with_limiter);
+      }
+#endif
+
+      if (with_feedback) {
+        scheduler_addunlock(sched, ci->hydro.super->hydro.sorts,
+                            t_star_density);
+
+        if (ci->hydro.super != cj->hydro.super) {
+          scheduler_addunlock(sched, cj->hydro.super->hydro.sorts,
+                              t_star_density);
+        }
+      }
+
+      if (ci->nodeID == nodeID) {
+        scheduler_addunlock(sched, t_force, ci->hydro.super->hydro.end_force);
+
+        if (with_feedback) {
+
+          scheduler_addunlock(sched, ci->hydro.super->stars.drift,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->stars.sorts,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->hydro.drift,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->stars.stars_in,
+                              t_star_density);
+          scheduler_addunlock(sched, t_star_density,
+                              ci->hydro.super->stars.ghost);
+          scheduler_addunlock(sched, ci->hydro.super->stars.ghost,
+                              t_star_feedback);
+          scheduler_addunlock(sched, t_star_feedback,
+                              ci->hydro.super->stars.stars_out);
+        }
+
+        if (with_limiter) {
+          scheduler_addunlock(sched, ci->super->kick2, t_limiter);
+          scheduler_addunlock(sched, t_limiter, ci->super->timestep);
+          scheduler_addunlock(sched, t_limiter, ci->super->timestep_limiter);
+        }
+      } else /*(ci->nodeID != nodeID) */ {
+        if (with_feedback) {
+          scheduler_addunlock(sched, ci->hydro.super->stars.sorts,
+                              t_star_feedback);
+        }
+      }
+
+      if (cj->nodeID == nodeID) {
+
+        if (ci->hydro.super != cj->hydro.super) {
+
+          scheduler_addunlock(sched, t_force, cj->hydro.super->hydro.end_force);
+
+          if (with_feedback) {
+
+            scheduler_addunlock(sched, cj->hydro.super->stars.sorts,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->stars.drift,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->hydro.drift,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->stars.stars_in,
+                                t_star_density);
+            scheduler_addunlock(sched, t_star_density,
+                                cj->hydro.super->stars.ghost);
+            scheduler_addunlock(sched, cj->hydro.super->stars.ghost,
+                                t_star_feedback);
+            scheduler_addunlock(sched, t_star_feedback,
+                                cj->hydro.super->stars.stars_out);
+          }
+
+          if (with_limiter) {
+            scheduler_addunlock(sched, cj->super->kick2, t_limiter);
+            scheduler_addunlock(sched, t_limiter, cj->super->timestep);
+            scheduler_addunlock(sched, t_limiter, cj->super->timestep_limiter);
+          }
+        }
+      } else /*(cj->nodeID != nodeID) */ {
+        if (with_feedback) {
+          scheduler_addunlock(sched, cj->hydro.super->stars.sorts,
+                              t_star_feedback);
+        }
+      }
+    }
+
+    /* Otherwise, sub-self interaction? */
+    else if (t_type == task_type_sub_self &&
+             t_subtype == task_subtype_density) {
+
+      /* Make all density tasks depend on the drift and sorts. */
+      scheduler_addunlock(sched, ci->hydro.super->hydro.drift, t);
+      scheduler_addunlock(sched, ci->hydro.super->hydro.sorts, t);
+
+      /* Start by constructing the task for the second hydro loop */
+      t_force = scheduler_addtask(sched, task_type_sub_self, task_subtype_force,
+                                  flags, 0, ci, NULL);
+
+      /* and the task for the time-step limiter */
+      if (with_limiter) {
+        t_limiter = scheduler_addtask(sched, task_type_sub_self,
+                                      task_subtype_limiter, flags, 0, ci, NULL);
+      }
+
+      /* The stellar feedback tasks */
+      if (with_feedback) {
+        t_star_density =
+            scheduler_addtask(sched, task_type_sub_self,
+                              task_subtype_stars_density, flags, 0, ci, NULL);
+        t_star_feedback =
+            scheduler_addtask(sched, task_type_sub_self,
+                              task_subtype_stars_feedback, flags, 0, ci, NULL);
+      }
+
+      /* Add the link between the new loop and the cell */
+      engine_addlink(e, &ci->hydro.force, t_force);
+      if (with_limiter) {
+        engine_addlink(e, &ci->hydro.limiter, t_limiter);
+      }
+      if (with_feedback) {
+        engine_addlink(e, &ci->stars.density, t_star_density);
+        engine_addlink(e, &ci->stars.feedback, t_star_feedback);
+      }
+
+#ifdef EXTRA_HYDRO_LOOP
+
+      /* Start by constructing the task for the second and third hydro loop */
+      t_gradient = scheduler_addtask(sched, task_type_sub_self,
+                                     task_subtype_gradient, flags, 0, ci, NULL);
+
+      /* Add the link between the new loop and the cell */
+      engine_addlink(e, &ci->hydro.gradient, t_gradient);
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                           t_limiter, ci, with_cooling,
+                                           with_limiter);
+#else
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, ci,
+                                           with_cooling, with_limiter);
+#endif
+
+      /* Create the task dependencies */
+      scheduler_addunlock(sched, t_force, ci->hydro.super->hydro.end_force);
+
+      if (with_feedback) {
+
+        scheduler_addunlock(sched, ci->hydro.super->stars.drift,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->stars.sorts,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->hydro.drift,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->hydro.sorts,
+                            t_star_density);
+        scheduler_addunlock(sched, ci->hydro.super->stars.stars_in,
+                            t_star_density);
+        scheduler_addunlock(sched, t_star_density,
+                            ci->hydro.super->stars.ghost);
+        scheduler_addunlock(sched, ci->hydro.super->stars.ghost,
+                            t_star_feedback);
+        scheduler_addunlock(sched, t_star_feedback,
+                            ci->hydro.super->stars.stars_out);
+      }
+
+      if (with_limiter) {
+        scheduler_addunlock(sched, ci->super->kick2, t_limiter);
+        scheduler_addunlock(sched, t_limiter, ci->super->timestep);
+        scheduler_addunlock(sched, t_limiter, ci->super->timestep_limiter);
+      }
+
+    }
+
+    /* Otherwise, sub-pair interaction? */
+    else if (t_type == task_type_sub_pair &&
+             t_subtype == task_subtype_density) {
+
+      /* Make all density tasks depend on the drift */
+      if (ci->nodeID == nodeID) {
+        scheduler_addunlock(sched, ci->hydro.super->hydro.drift, t);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        scheduler_addunlock(sched, cj->hydro.super->hydro.drift, t);
+      }
+
+      /* Make all density tasks depend on the sorts */
+      scheduler_addunlock(sched, ci->hydro.super->hydro.sorts, t);
+      if (ci->hydro.super != cj->hydro.super) {
+        scheduler_addunlock(sched, cj->hydro.super->hydro.sorts, t);
+      }
+
+      /* New task for the force */
+      t_force = scheduler_addtask(sched, task_type_sub_pair, task_subtype_force,
+                                  flags, 0, ci, cj);
+
+      /* and the task for the time-step limiter */
+      if (with_limiter) {
+        t_limiter = scheduler_addtask(sched, task_type_sub_pair,
+                                      task_subtype_limiter, flags, 0, ci, cj);
+      }
+
+      /* The stellar feedback tasks */
+      if (with_feedback) {
+        t_star_density =
+            scheduler_addtask(sched, task_type_sub_pair,
+                              task_subtype_stars_density, flags, 0, ci, cj);
+        t_star_feedback =
+            scheduler_addtask(sched, task_type_sub_pair,
+                              task_subtype_stars_feedback, flags, 0, ci, cj);
+      }
+
+      engine_addlink(e, &ci->hydro.force, t_force);
+      engine_addlink(e, &cj->hydro.force, t_force);
+      if (with_limiter) {
+        engine_addlink(e, &ci->hydro.limiter, t_limiter);
+        engine_addlink(e, &cj->hydro.limiter, t_limiter);
+      }
+      if (with_feedback) {
+        engine_addlink(e, &ci->stars.density, t_star_density);
+        engine_addlink(e, &cj->stars.density, t_star_density);
+        engine_addlink(e, &ci->stars.feedback, t_star_feedback);
+        engine_addlink(e, &cj->stars.feedback, t_star_feedback);
+      }
+
+#ifdef EXTRA_HYDRO_LOOP
+
+      /* Start by constructing the task for the second and third hydro loop */
+      t_gradient = scheduler_addtask(sched, task_type_sub_pair,
+                                     task_subtype_gradient, flags, 0, ci, cj);
+
+      /* Add the link between the new loop and both cells */
+      engine_addlink(e, &ci->hydro.gradient, t_gradient);
+      engine_addlink(e, &cj->hydro.gradient, t_gradient);
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      if (ci->nodeID == nodeID) {
+        engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                             t_limiter, ci, with_cooling,
+                                             with_limiter);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        engine_make_hydro_loops_dependencies(sched, t, t_gradient, t_force,
+                                             t_limiter, cj, with_cooling,
+                                             with_limiter);
+      }
+#else
+
+      /* Now, build all the dependencies for the hydro for the cells */
+      /* that are local and are not descendant of the same super_hydro-cells */
+      if (ci->nodeID == nodeID) {
+        engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, ci,
+                                             with_cooling, with_limiter);
+      }
+      if ((cj->nodeID == nodeID) && (ci->hydro.super != cj->hydro.super)) {
+        engine_make_hydro_loops_dependencies(sched, t, t_force, t_limiter, cj,
+                                             with_cooling, with_limiter);
+      }
+#endif
+
+      if (with_feedback) {
+        scheduler_addunlock(sched, ci->hydro.super->hydro.sorts,
+                            t_star_density);
+        if (ci->hydro.super != cj->hydro.super) {
+          scheduler_addunlock(sched, cj->hydro.super->hydro.sorts,
+                              t_star_density);
+        }
+      }
+
+      if (ci->nodeID == nodeID) {
+        scheduler_addunlock(sched, t_force, ci->hydro.super->hydro.end_force);
+
+        if (with_feedback) {
+
+          scheduler_addunlock(sched, ci->hydro.super->stars.sorts,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->stars.drift,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->hydro.drift,
+                              t_star_density);
+          scheduler_addunlock(sched, ci->hydro.super->stars.stars_in,
+                              t_star_density);
+          scheduler_addunlock(sched, t_star_density,
+                              ci->hydro.super->stars.ghost);
+          scheduler_addunlock(sched, ci->hydro.super->stars.ghost,
+                              t_star_feedback);
+          scheduler_addunlock(sched, t_star_feedback,
+                              ci->hydro.super->stars.stars_out);
+        }
+
+        if (with_limiter) {
+          scheduler_addunlock(sched, ci->super->kick2, t_limiter);
+          scheduler_addunlock(sched, t_limiter, ci->super->timestep);
+          scheduler_addunlock(sched, t_limiter, ci->super->timestep_limiter);
+        }
+      } else /* ci->nodeID != nodeID */ {
+
+        if (with_feedback) {
+          /* message("%p/%p",ci->hydro.super->stars.sorts, t_star_feedback); */
+          scheduler_addunlock(sched, ci->hydro.super->stars.sorts,
+                              t_star_feedback);
+        }
+      }
+
+      if (cj->nodeID == nodeID) {
+
+        if (ci->hydro.super != cj->hydro.super) {
+
+          scheduler_addunlock(sched, t_force, cj->hydro.super->hydro.end_force);
+
+          if (with_feedback) {
+
+            scheduler_addunlock(sched, cj->hydro.super->stars.sorts,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->stars.drift,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->hydro.drift,
+                                t_star_density);
+            scheduler_addunlock(sched, cj->hydro.super->stars.stars_in,
+                                t_star_density);
+            scheduler_addunlock(sched, t_star_density,
+                                cj->hydro.super->stars.ghost);
+            scheduler_addunlock(sched, cj->hydro.super->stars.ghost,
+                                t_star_feedback);
+            scheduler_addunlock(sched, t_star_feedback,
+                                cj->hydro.super->stars.stars_out);
+          }
+
+          if (with_limiter) {
+            scheduler_addunlock(sched, cj->super->kick2, t_limiter);
+            scheduler_addunlock(sched, t_limiter, cj->super->timestep);
+            scheduler_addunlock(sched, t_limiter, cj->super->timestep_limiter);
+          }
+        }
+      } else /* cj->nodeID != nodeID */ {
+        if (with_feedback) {
+          scheduler_addunlock(sched, cj->hydro.super->stars.sorts,
+                              t_star_feedback);
+        }
+      }
+    }
+  }
+}
+/**
+ * @brief Constructs the top-level pair tasks for the first hydro loop over
+ * neighbours
+ *
+ * Here we construct all the tasks for all possible neighbouring non-empty
+ * local cells in the hierarchy. No dependencies are being added thus far.
+ * Additional loop over neighbours can later be added by simply duplicating
+ * all the tasks created by this function.
+ *
+ * @param map_data Offset of first two indices disguised as a pointer.
+ * @param num_elements Number of cells to traverse.
+ * @param extra_data The #engine.
+ */
+void engine_make_hydroloop_tasks_mapper(void *map_data, int num_elements,
+                                        void *extra_data) {
+
+  /* Extract the engine pointer. */
+  struct engine *e = (struct engine *)extra_data;
+  const int periodic = e->s->periodic;
+  const int with_feedback = (e->policy & engine_policy_feedback);
+
+  struct space *s = e->s;
+  struct scheduler *sched = &e->sched;
+  const int nodeID = e->nodeID;
+  const int *cdim = s->cdim;
+  struct cell *cells = s->cells_top;
+
+  /* Loop through the elements, which are just byte offsets from NULL. */
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    /* Get the cell index. */
+    const int cid = (size_t)(map_data) + ind;
+
+    /* Integer indices of the cell in the top-level grid */
+    const int i = cid / (cdim[1] * cdim[2]);
+    const int j = (cid / cdim[2]) % cdim[1];
+    const int k = cid % cdim[2];
+
+    /* Get the cell */
+    struct cell *ci = &cells[cid];
+
+    /* Skip cells without hydro or star particles */
+    if ((ci->hydro.count == 0) && (!with_feedback || ci->stars.count == 0))
+      continue;
+
+    /* If the cell is local build a self-interaction */
+    if (ci->nodeID == nodeID) {
+      scheduler_addtask(sched, task_type_self, task_subtype_density, 0, 0, ci,
+                        NULL);
+    }
+
+    /* Now loop over all the neighbours of this cell */
+    for (int ii = -1; ii < 2; ii++) {
+      int iii = i + ii;
+      if (!periodic && (iii < 0 || iii >= cdim[0])) continue;
+      iii = (iii + cdim[0]) % cdim[0];
+      for (int jj = -1; jj < 2; jj++) {
+        int jjj = j + jj;
+        if (!periodic && (jjj < 0 || jjj >= cdim[1])) continue;
+        jjj = (jjj + cdim[1]) % cdim[1];
+        for (int kk = -1; kk < 2; kk++) {
+          int kkk = k + kk;
+          if (!periodic && (kkk < 0 || kkk >= cdim[2])) continue;
+          kkk = (kkk + cdim[2]) % cdim[2];
+
+          /* Get the neighbouring cell */
+          const int cjd = cell_getid(cdim, iii, jjj, kkk);
+          struct cell *cj = &cells[cjd];
+
+          /* Is that neighbour local and does it have gas or star particles ? */
+          if ((cid >= cjd) ||
+              ((cj->hydro.count == 0) &&
+               (!with_feedback || cj->stars.count == 0)) ||
+              (ci->nodeID != nodeID && cj->nodeID != nodeID))
+            continue;
+
+          /* Construct the pair task */
+          const int sid = sortlistID[(kk + 1) + 3 * ((jj + 1) + 3 * (ii + 1))];
+          scheduler_addtask(sched, task_type_pair, task_subtype_density, sid, 0,
+                            ci, cj);
+
+#ifdef SWIFT_DEBUG_CHECKS
+#ifdef WITH_MPI
+
+          /* Let's cross-check that we had a proxy for that cell */
+          if (ci->nodeID == nodeID && cj->nodeID != engine_rank) {
+
+            /* Find the proxy for this node */
+            const int proxy_id = e->proxy_ind[cj->nodeID];
+            if (proxy_id < 0)
+              error("No proxy exists for that foreign node %d!", cj->nodeID);
+
+            const struct proxy *p = &e->proxies[proxy_id];
+
+            /* Check whether the cell exists in the proxy */
+            int n = 0;
+            for (n = 0; n < p->nr_cells_in; n++)
+              if (p->cells_in[n] == cj) break;
+            if (n == p->nr_cells_in)
+              error(
+                  "Cell %d not found in the proxy but trying to construct "
+                  "hydro task!",
+                  cjd);
+          } else if (cj->nodeID == nodeID && ci->nodeID != engine_rank) {
+
+            /* Find the proxy for this node */
+            const int proxy_id = e->proxy_ind[ci->nodeID];
+            if (proxy_id < 0)
+              error("No proxy exists for that foreign node %d!", ci->nodeID);
+
+            const struct proxy *p = &e->proxies[proxy_id];
+
+            /* Check whether the cell exists in the proxy */
+            int n = 0;
+            for (n = 0; n < p->nr_cells_in; n++)
+              if (p->cells_in[n] == ci) break;
+            if (n == p->nr_cells_in)
+              error(
+                  "Cell %d not found in the proxy but trying to construct "
+                  "hydro task!",
+                  cid);
+          }
+#endif /* WITH_MPI */
+#endif /* SWIFT_DEBUG_CHECKS */
+        }
+      }
+    }
+  }
+}
+
+struct cell_type_pair {
+  struct cell *ci, *cj;
+  int type;
+};
+
+void engine_addtasks_send_mapper(void *map_data, int num_elements,
+                                 void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  const int with_limiter = (e->policy & engine_policy_limiter);
+  struct cell_type_pair *cell_type_pairs = (struct cell_type_pair *)map_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    struct cell *ci = cell_type_pairs[k].ci;
+    struct cell *cj = cell_type_pairs[k].cj;
+    const int type = cell_type_pairs[k].type;
+
+    /* Add the send task for the particle timesteps. */
+    engine_addtasks_send_timestep(e, ci, cj, NULL, NULL, with_limiter);
+
+    /* Add the send tasks for the cells in the proxy that have a hydro
+     * connection. */
+    if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
+      engine_addtasks_send_hydro(e, ci, cj, /*t_xv=*/NULL,
+                                 /*t_rho=*/NULL, /*t_gradient=*/NULL);
+
+    /* Add the send tasks for the cells in the proxy that have a stars
+     * connection. */
+    if ((e->policy & engine_policy_feedback) && (type & proxy_cell_type_hydro))
+      engine_addtasks_send_stars(e, ci, cj, /*t_feedback=*/NULL);
+
+    /* Add the send tasks for the cells in the proxy that have a gravity
+     * connection. */
+    if ((e->policy & engine_policy_self_gravity) &&
+        (type & proxy_cell_type_gravity))
+      engine_addtasks_send_gravity(e, ci, cj, NULL);
+  }
+}
+
+void engine_addtasks_recv_mapper(void *map_data, int num_elements,
+                                 void *extra_data) {
+
+  struct engine *e = (struct engine *)extra_data;
+  const int with_limiter = (e->policy & engine_policy_limiter);
+  struct cell_type_pair *cell_type_pairs = (struct cell_type_pair *)map_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    struct cell *ci = cell_type_pairs[k].ci;
+    const int type = cell_type_pairs[k].type;
+
+    /* Add the recv task for the particle timesteps. */
+    engine_addtasks_recv_timestep(e, ci, NULL, NULL, with_limiter);
+
+    /* Add the recv tasks for the cells in the proxy that have a hydro
+     * connection. */
+    if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
+      engine_addtasks_recv_hydro(e, ci, NULL, NULL, NULL);
+
+    /* Add the recv tasks for the cells in the proxy that have a stars
+     * connection. */
+    if ((e->policy & engine_policy_feedback) && (type & proxy_cell_type_hydro))
+      engine_addtasks_recv_stars(e, ci, NULL);
+
+    /* Add the recv tasks for the cells in the proxy that have a gravity
+     * connection. */
+    if ((e->policy & engine_policy_self_gravity) &&
+        (type & proxy_cell_type_gravity))
+      engine_addtasks_recv_gravity(e, ci, NULL);
+  }
+}
+
+/**
+ * @brief Fill the #space's task list.
+ *
+ * @param e The #engine we are working with.
+ */
+void engine_maketasks(struct engine *e) {
+
+  struct space *s = e->s;
+  struct scheduler *sched = &e->sched;
+  struct cell *cells = s->cells_top;
+  const int nr_cells = s->nr_cells;
+  const ticks tic = getticks();
+
+  /* Re-set the scheduler. */
+  scheduler_reset(sched, engine_estimate_nr_tasks(e));
+
+  ticks tic2 = getticks();
+
+  /* Construct the first hydro loop over neighbours */
+  if (e->policy & engine_policy_hydro)
+    threadpool_map(&e->threadpool, engine_make_hydroloop_tasks_mapper, NULL,
+                   s->nr_cells, 1, 0, e);
+
+  if (e->verbose)
+    message("Making hydro tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  tic2 = getticks();
+
+  /* Add the self gravity tasks. */
+  if (e->policy & engine_policy_self_gravity) {
+    threadpool_map(&e->threadpool, engine_make_self_gravity_tasks_mapper, NULL,
+                   s->nr_cells, 1, 0, e);
+  }
+
+  if (e->verbose)
+    message("Making gravity tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  /* Add the external gravity tasks. */
+  if (e->policy & engine_policy_external_gravity)
+    engine_make_external_gravity_tasks(e);
+
+  if (e->sched.nr_tasks == 0 && (s->nr_gparts > 0 || s->nr_parts > 0))
+    error("We have particles but no hydro or gravity tasks were created.");
+
+  tic2 = getticks();
+
+  /* Split the tasks. */
+  scheduler_splittasks(sched);
+
+  if (e->verbose)
+    message("Splitting tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Verify that we are not left with invalid tasks */
+  for (int i = 0; i < e->sched.nr_tasks; ++i) {
+    const struct task *t = &e->sched.tasks[i];
+    if (t->ci == NULL && t->cj != NULL && !t->skip) error("Invalid task");
+  }
+#endif
+
+  /* Free the old list of cell-task links. */
+  if (e->links != NULL) free(e->links);
+  e->size_links = e->sched.nr_tasks * e->links_per_tasks;
+
+  /* Make sure that we have space for more links than last time. */
+  if (e->size_links < e->nr_links * engine_rebuild_link_alloc_margin)
+    e->size_links = e->nr_links * engine_rebuild_link_alloc_margin;
+
+  /* Allocate the new link list */
+  if ((e->links = (struct link *)malloc(sizeof(struct link) * e->size_links)) ==
+      NULL)
+    error("Failed to allocate cell-task links.");
+  e->nr_links = 0;
+
+  tic2 = getticks();
+
+  /* Count the number of tasks associated with each cell and
+     store the density tasks in each cell, and make each sort
+     depend on the sorts of its progeny. */
+  threadpool_map(&e->threadpool, engine_count_and_link_tasks_mapper,
+                 sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
+
+  if (e->verbose)
+    message("Counting and linking tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  tic2 = getticks();
+
+  /* Re-set the tag counter. MPI tags are defined for top-level cells in
+   * cell_set_super_mapper. */
+#ifdef WITH_MPI
+  cell_next_tag = 0;
+#endif
+
+  /* Now that the self/pair tasks are at the right level, set the super
+   * pointers. */
+  threadpool_map(&e->threadpool, cell_set_super_mapper, cells, nr_cells,
+                 sizeof(struct cell), 0, e);
+
+  if (e->verbose)
+    message("Setting super-pointers took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  /* Append hierarchical tasks to each cell. */
+  threadpool_map(&e->threadpool, engine_make_hierarchical_tasks_mapper, cells,
+                 nr_cells, sizeof(struct cell), 0, e);
+
+  tic2 = getticks();
+
+  /* Run through the tasks and make force tasks for each density task.
+     Each force task depends on the cell ghosts and unlocks the kick task
+     of its super-cell. */
+  if (e->policy & engine_policy_hydro)
+    threadpool_map(&e->threadpool, engine_make_extra_hydroloop_tasks_mapper,
+                   sched->tasks, sched->nr_tasks, sizeof(struct task), 0, e);
+
+  if (e->verbose)
+    message("Making extra hydroloop tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  tic2 = getticks();
+
+  /* Add the dependencies for the gravity stuff */
+  if (e->policy & (engine_policy_self_gravity | engine_policy_external_gravity))
+    engine_link_gravity_tasks(e);
+
+  if (e->verbose)
+    message("Linking gravity tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  tic2 = getticks();
+
+#ifdef WITH_MPI
+  /* Add the communication tasks if MPI is being used. */
+  if (e->policy & engine_policy_mpi) {
+
+    tic2 = getticks();
+
+    /* Loop over the proxies and add the send tasks, which also generates the
+     * cell tags for super-cells. */
+    int max_num_send_cells = 0;
+    for (int pid = 0; pid < e->nr_proxies; pid++)
+      max_num_send_cells += e->proxies[pid].nr_cells_out;
+    struct cell_type_pair *send_cell_type_pairs = NULL;
+    if ((send_cell_type_pairs = (struct cell_type_pair *)malloc(
+             sizeof(struct cell_type_pair) * max_num_send_cells)) == NULL)
+      error("Failed to allocate temporary cell pointer list.");
+    int num_send_cells = 0;
+
+    for (int pid = 0; pid < e->nr_proxies; pid++) {
+
+      /* Get a handle on the proxy. */
+      struct proxy *p = &e->proxies[pid];
+
+      for (int k = 0; k < p->nr_cells_out; k++) {
+        send_cell_type_pairs[num_send_cells].ci = p->cells_out[k];
+        send_cell_type_pairs[num_send_cells].cj = p->cells_in[0];
+        send_cell_type_pairs[num_send_cells++].type = p->cells_out_type[k];
+      }
+    }
+
+    threadpool_map(&e->threadpool, engine_addtasks_send_mapper,
+                   send_cell_type_pairs, num_send_cells,
+                   sizeof(struct cell_type_pair),
+                   /*chunk=*/0, e);
+
+    free(send_cell_type_pairs);
+
+    if (e->verbose)
+      message("Creating send tasks took %.3f %s.",
+              clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+    tic2 = getticks();
+
+    /* Exchange the cell tags. */
+    proxy_tags_exchange(e->proxies, e->nr_proxies, s);
+
+    if (e->verbose)
+      message("Exchanging cell tags took %.3f %s.",
+              clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+    tic2 = getticks();
+
+    /* Loop over the proxies and add the recv tasks, which relies on having the
+     * cell tags. */
+    int max_num_recv_cells = 0;
+    for (int pid = 0; pid < e->nr_proxies; pid++)
+      max_num_recv_cells += e->proxies[pid].nr_cells_in;
+    struct cell_type_pair *recv_cell_type_pairs = NULL;
+    if ((recv_cell_type_pairs = (struct cell_type_pair *)malloc(
+             sizeof(struct cell_type_pair) * max_num_recv_cells)) == NULL)
+      error("Failed to allocate temporary cell pointer list.");
+    int num_recv_cells = 0;
+    for (int pid = 0; pid < e->nr_proxies; pid++) {
+
+      /* Get a handle on the proxy. */
+      struct proxy *p = &e->proxies[pid];
+      for (int k = 0; k < p->nr_cells_in; k++) {
+        recv_cell_type_pairs[num_recv_cells].ci = p->cells_in[k];
+        recv_cell_type_pairs[num_recv_cells++].type = p->cells_in_type[k];
+      }
+    }
+    threadpool_map(&e->threadpool, engine_addtasks_recv_mapper,
+                   recv_cell_type_pairs, num_recv_cells,
+                   sizeof(struct cell_type_pair),
+                   /*chunk=*/0, e);
+    free(recv_cell_type_pairs);
+
+    if (e->verbose)
+      message("Creating recv tasks took %.3f %s.",
+              clocks_from_ticks(getticks() - tic2), clocks_getunit());
+  }
+
+  /* Allocate memory for foreign particles */
+  engine_allocate_foreign_particles(e);
+
+#endif
+
+  /* Report the number of tasks we actually used */
+  if (e->verbose)
+    message(
+        "Nr. of tasks: %d allocated tasks: %d ratio: %f memory use: %zd MB.",
+        e->sched.nr_tasks, e->sched.size,
+        (float)e->sched.nr_tasks / (float)e->sched.size,
+        e->sched.size * sizeof(struct task) / (1024 * 1024));
+
+  /* Report the number of links we actually used */
+  if (e->verbose)
+    message(
+        "Nr. of links: %zd allocated links: %zd ratio: %f memory use: %zd MB.",
+        e->nr_links, e->size_links, (float)e->nr_links / (float)e->size_links,
+        e->size_links * sizeof(struct link) / (1024 * 1024));
+
+  tic2 = getticks();
+
+  /* Set the unlocks per task. */
+  scheduler_set_unlocks(sched);
+
+  if (e->verbose)
+    message("Setting unlocks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  tic2 = getticks();
+
+  /* Rank the tasks. */
+  scheduler_ranktasks(sched);
+
+  if (e->verbose)
+    message("Ranking the tasks took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
+  /* Weight the tasks. */
+  scheduler_reweight(sched, e->verbose);
+
+  /* Set the tasks age. */
+  e->tasks_age = 0;
+
+  if (e->verbose)
+    message("took %.3f %s (including reweight).",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+}
diff --git a/src/engine_marktasks.c b/src/engine_marktasks.c
new file mode 100644
index 0000000000000000000000000000000000000000..c02eb5d2bd272111808701269faed07cef505449
--- /dev/null
+++ b/src/engine_marktasks.c
@@ -0,0 +1,679 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
+ *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2015 Peter W. Draper (p.w.draper@durham.ac.uk)
+ *                    Angus Lepper (angus.lepper@ed.ac.uk)
+ *               2016 John A. Regan (john.a.regan@durham.ac.uk)
+ *                    Tom Theuns (tom.theuns@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <stdlib.h>
+#include <unistd.h>
+
+/* MPI headers. */
+#ifdef WITH_MPI
+#include <mpi.h>
+#endif
+
+/* Load the profiler header, if needed. */
+#ifdef WITH_PROFILER
+#include <gperftools/profiler.h>
+#endif
+
+/* This object's header. */
+#include "engine.h"
+
+/* Local headers. */
+#include "active.h"
+#include "atomic.h"
+#include "cell.h"
+#include "clocks.h"
+#include "cycle.h"
+#include "debug.h"
+#include "error.h"
+#include "proxy.h"
+#include "timers.h"
+
+/**
+ * @brief Mark tasks to be un-skipped and set the sort flags accordingly.
+ *        Threadpool mapper function.
+ *
+ * @param map_data pointer to the tasks
+ * @param num_elements number of tasks
+ * @param extra_data pointer to int that will define if a rebuild is needed.
+ */
+void engine_marktasks_mapper(void *map_data, int num_elements,
+                             void *extra_data) {
+  /* Unpack the arguments. */
+  struct task *tasks = (struct task *)map_data;
+  size_t *rebuild_space = &((size_t *)extra_data)[1];
+  struct scheduler *s = (struct scheduler *)(((size_t *)extra_data)[2]);
+  struct engine *e = (struct engine *)((size_t *)extra_data)[0];
+  const int nodeID = e->nodeID;
+  const int with_limiter = e->policy & engine_policy_limiter;
+
+  for (int ind = 0; ind < num_elements; ind++) {
+
+    /* Get basic task information */
+    struct task *t = &tasks[ind];
+    const enum task_types t_type = t->type;
+    const enum task_subtypes t_subtype = t->subtype;
+
+    /* Single-cell task? */
+    if (t_type == task_type_self || t_type == task_type_sub_self) {
+
+      /* Local pointer. */
+      struct cell *ci = t->ci;
+
+      if (ci->nodeID != nodeID) error("Non-local self task found");
+
+      /* Activate the hydro drift */
+      if (t_type == task_type_self && t_subtype == task_subtype_density) {
+        if (cell_is_active_hydro(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_drift_part(ci, s);
+          if (with_limiter) cell_activate_limiter(ci, s);
+        }
+      }
+
+      /* Store current values of dx_max and h_max. */
+      else if (t_type == task_type_sub_self &&
+               t_subtype == task_subtype_density) {
+        if (cell_is_active_hydro(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_subcell_hydro_tasks(ci, NULL, s);
+          if (with_limiter) cell_activate_limiter(ci, s);
+        }
+      }
+
+      else if (t_type == task_type_self && t_subtype == task_subtype_force) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      else if (t_type == task_type_sub_self &&
+               t_subtype == task_subtype_force) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      else if (t->type == task_type_self &&
+               t->subtype == task_subtype_limiter) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      else if (t->type == task_type_sub_self &&
+               t->subtype == task_subtype_limiter) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      else if (t_type == task_type_self && t_subtype == task_subtype_gradient) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      else if (t_type == task_type_sub_self &&
+               t_subtype == task_subtype_gradient) {
+        if (cell_is_active_hydro(ci, e)) scheduler_activate(s, t);
+      }
+
+      /* Activate the star density */
+      else if (t_type == task_type_self &&
+               t_subtype == task_subtype_stars_density) {
+        if (cell_is_active_stars(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_drift_part(ci, s);
+          cell_activate_drift_spart(ci, s);
+        }
+      }
+
+      /* Store current values of dx_max and h_max. */
+      else if (t_type == task_type_sub_self &&
+               t_subtype == task_subtype_stars_density) {
+        if (cell_is_active_stars(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_subcell_stars_tasks(ci, NULL, s);
+        }
+      }
+
+      else if (t_type == task_type_self &&
+               t_subtype == task_subtype_stars_feedback) {
+        if (cell_is_active_stars(ci, e)) {
+          scheduler_activate(s, t);
+        }
+      }
+
+      else if (t_type == task_type_sub_self &&
+               t_subtype == task_subtype_stars_feedback) {
+        if (cell_is_active_stars(ci, e)) scheduler_activate(s, t);
+      }
+
+      /* Activate the gravity drift */
+      else if (t_type == task_type_self && t_subtype == task_subtype_grav) {
+        if (cell_is_active_gravity(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_subcell_grav_tasks(t->ci, NULL, s);
+        }
+      }
+
+      /* Activate the gravity drift */
+      else if (t_type == task_type_self &&
+               t_subtype == task_subtype_external_grav) {
+        if (cell_is_active_gravity(ci, e)) {
+          scheduler_activate(s, t);
+          cell_activate_drift_gpart(t->ci, s);
+        }
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      else {
+        error("Invalid task type / sub-type encountered");
+      }
+#endif
+    }
+
+    /* Pair? */
+    else if (t_type == task_type_pair || t_type == task_type_sub_pair) {
+
+      /* Local pointers. */
+      struct cell *ci = t->ci;
+      struct cell *cj = t->cj;
+#ifdef WITH_MPI
+      const int ci_nodeID = ci->nodeID;
+      const int cj_nodeID = cj->nodeID;
+#else
+      const int ci_nodeID = nodeID;
+      const int cj_nodeID = nodeID;
+#endif
+      const int ci_active_hydro = cell_is_active_hydro(ci, e);
+      const int cj_active_hydro = cell_is_active_hydro(cj, e);
+
+      const int ci_active_gravity = cell_is_active_gravity(ci, e);
+      const int cj_active_gravity = cell_is_active_gravity(cj, e);
+
+      const int ci_active_stars = cell_is_active_stars(ci, e);
+      const int cj_active_stars = cell_is_active_stars(cj, e);
+
+      /* Only activate tasks that involve a local active cell. */
+      if ((t_subtype == task_subtype_density ||
+           t_subtype == task_subtype_gradient ||
+           t_subtype == task_subtype_limiter ||
+           t_subtype == task_subtype_force) &&
+          ((ci_active_hydro && ci_nodeID == nodeID) ||
+           (cj_active_hydro && cj_nodeID == nodeID))) {
+
+        scheduler_activate(s, t);
+
+        /* Set the correct sorting flags */
+        if (t_type == task_type_pair && t_subtype == task_subtype_density) {
+
+          /* Store some values. */
+          atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
+          atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
+          ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+          cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
+
+          /* Activate the hydro drift tasks. */
+          if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+          if (cj_nodeID == nodeID) cell_activate_drift_part(cj, s);
+
+          /* And the limiter */
+          if (ci_nodeID == nodeID && with_limiter) cell_activate_limiter(ci, s);
+          if (cj_nodeID == nodeID && with_limiter) cell_activate_limiter(cj, s);
+
+          /* Check the sorts and activate them if needed. */
+          cell_activate_hydro_sorts(ci, t->flags, s);
+          cell_activate_hydro_sorts(cj, t->flags, s);
+
+        }
+
+        /* Store current values of dx_max and h_max. */
+        else if (t_type == task_type_sub_pair &&
+                 t_subtype == task_subtype_density) {
+          cell_activate_subcell_hydro_tasks(t->ci, t->cj, s);
+        }
+      }
+
+      /* Stars density */
+      else if ((t_subtype == task_subtype_stars_density) &&
+               (ci_active_stars || cj_active_stars) &&
+               (ci_nodeID == nodeID || cj_nodeID == nodeID)) {
+
+        scheduler_activate(s, t);
+
+        /* Set the correct sorting flags */
+        if (t_type == task_type_pair) {
+
+          /* Do ci */
+          if (ci_active_stars) {
+
+            /* stars for ci */
+            atomic_or(&ci->stars.requires_sorts, 1 << t->flags);
+            ci->stars.dx_max_sort_old = ci->stars.dx_max_sort;
+
+            /* hydro for cj */
+            atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
+            cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
+
+            /* Activate the drift tasks. */
+            if (ci_nodeID == nodeID) cell_activate_drift_spart(ci, s);
+            if (cj_nodeID == nodeID) cell_activate_drift_part(cj, s);
+
+            /* Check the sorts and activate them if needed. */
+            cell_activate_hydro_sorts(cj, t->flags, s);
+            cell_activate_stars_sorts(ci, t->flags, s);
+          }
+
+          /* Do cj */
+          if (cj_active_stars) {
+
+            /* hydro for ci */
+            atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
+            ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+
+            /* stars for cj */
+            atomic_or(&cj->stars.requires_sorts, 1 << t->flags);
+            cj->stars.dx_max_sort_old = cj->stars.dx_max_sort;
+
+            /* Activate the drift tasks. */
+            if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+            if (cj_nodeID == nodeID) cell_activate_drift_spart(cj, s);
+
+            /* Check the sorts and activate them if needed. */
+            cell_activate_hydro_sorts(ci, t->flags, s);
+            cell_activate_stars_sorts(cj, t->flags, s);
+          }
+        }
+
+        /* Store current values of dx_max and h_max. */
+        else if (t_type == task_type_sub_pair &&
+                 t_subtype == task_subtype_stars_density) {
+          cell_activate_subcell_stars_tasks(ci, cj, s);
+        }
+      }
+
+      /* Stars feedback */
+      else if ((t_subtype == task_subtype_stars_feedback) &&
+               ((ci_active_stars && ci_nodeID == nodeID) ||
+                (cj_active_stars && cj_nodeID == nodeID))) {
+
+        scheduler_activate(s, t);
+      }
+
+      /* Gravity */
+      else if ((t_subtype == task_subtype_grav) &&
+               ((ci_active_gravity && ci_nodeID == nodeID) ||
+                (cj_active_gravity && cj_nodeID == nodeID))) {
+
+        scheduler_activate(s, t);
+
+        if (t_type == task_type_pair && t_subtype == task_subtype_grav) {
+          /* Activate the gravity drift */
+          cell_activate_subcell_grav_tasks(t->ci, t->cj, s);
+        }
+
+#ifdef SWIFT_DEBUG_CHECKS
+        else if (t_type == task_type_sub_pair &&
+                 t_subtype == task_subtype_grav) {
+          error("Invalid task sub-type encountered");
+        }
+#endif
+      }
+
+      /* Only interested in density tasks as of here. */
+      if (t_subtype == task_subtype_density) {
+
+        /* Too much particle movement? */
+        if (cell_need_rebuild_for_hydro_pair(ci, cj)) *rebuild_space = 1;
+
+#ifdef WITH_MPI
+        /* Activate the send/recv tasks. */
+        if (ci_nodeID != nodeID) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (cj_active_hydro) {
+            scheduler_activate(s, ci->mpi.hydro.recv_xv);
+            if (ci_active_hydro) {
+              scheduler_activate(s, ci->mpi.hydro.recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+              scheduler_activate(s, ci->mpi.hydro.recv_gradient);
+#endif
+            }
+          }
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (ci_active_hydro) scheduler_activate(s, ci->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (ci_active_hydro) {
+            struct link *l =
+                scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_part(l->t->ci, s);
+
+            /* If the local cell is also active, more stuff will be needed. */
+            if (cj_active_hydro) {
+              scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
+
+#ifdef EXTRA_HYDRO_LOOP
+              scheduler_activate_send(s, cj->mpi.hydro.send_gradient,
+                                      ci_nodeID);
+#endif
+            }
+          }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (cj_active_hydro)
+            scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
+
+        } else if (cj_nodeID != nodeID) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (ci_active_hydro) {
+
+            scheduler_activate(s, cj->mpi.hydro.recv_xv);
+            if (cj_active_hydro) {
+              scheduler_activate(s, cj->mpi.hydro.recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+              scheduler_activate(s, cj->mpi.hydro.recv_gradient);
+#endif
+            }
+          }
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (cj_active_hydro) scheduler_activate(s, cj->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (cj_active_hydro) {
+
+            struct link *l =
+                scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_part(l->t->ci, s);
+
+            /* If the local cell is also active, more stuff will be needed. */
+            if (ci_active_hydro) {
+
+              scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
+
+#ifdef EXTRA_HYDRO_LOOP
+              scheduler_activate_send(s, ci->mpi.hydro.send_gradient,
+                                      cj_nodeID);
+#endif
+            }
+          }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (ci_active_hydro)
+            scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
+        }
+#endif
+      }
+
+      /* Only interested in stars_density tasks as of here. */
+      else if (t->subtype == task_subtype_stars_density) {
+
+        /* Too much particle movement? */
+        if (cell_need_rebuild_for_stars_pair(ci, cj)) *rebuild_space = 1;
+        if (cell_need_rebuild_for_stars_pair(cj, ci)) *rebuild_space = 1;
+
+#ifdef WITH_MPI
+        /* Activate the send/recv tasks. */
+        if (ci_nodeID != nodeID) {
+
+          if (cj_active_stars) {
+            scheduler_activate(s, ci->mpi.hydro.recv_xv);
+            scheduler_activate(s, ci->mpi.hydro.recv_rho);
+
+            /* If the local cell is active, more stuff will be needed. */
+            scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
+            cell_activate_drift_spart(cj, s);
+
+            /* If the local cell is active, send its ti_end values. */
+            scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
+          }
+
+          if (ci_active_stars) {
+            scheduler_activate(s, ci->mpi.stars.recv);
+
+            /* If the foreign cell is active, we want its ti_end values. */
+            scheduler_activate(s, ci->mpi.recv_ti);
+
+            /* Is the foreign cell active and will need stuff from us? */
+            scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
+            scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
+
+            /* Drift the cell which will be sent; note that not all sent
+               particles will be drifted, only those that are needed. */
+            cell_activate_drift_part(cj, s);
+          }
+
+        } else if (cj_nodeID != nodeID) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (ci_active_stars) {
+            scheduler_activate(s, cj->mpi.hydro.recv_xv);
+            scheduler_activate(s, cj->mpi.hydro.recv_rho);
+
+            /* If the local cell is active, more stuff will be needed. */
+            scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
+            cell_activate_drift_spart(ci, s);
+
+            /* If the local cell is active, send its ti_end values. */
+            scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
+          }
+
+          if (cj_active_stars) {
+            scheduler_activate(s, cj->mpi.stars.recv);
+
+            /* If the foreign cell is active, we want its ti_end values. */
+            scheduler_activate(s, cj->mpi.recv_ti);
+
+            /* Is the foreign cell active and will need stuff from us? */
+            scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
+            scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
+
+            /* Drift the cell which will be sent; note that not all sent
+               particles will be drifted, only those that are needed. */
+            cell_activate_drift_part(ci, s);
+          }
+        }
+#endif
+      }
+
+      /* Only interested in gravity tasks as of here. */
+      else if (t_subtype == task_subtype_grav) {
+
+#ifdef WITH_MPI
+        /* Activate the send/recv tasks. */
+        if (ci_nodeID != nodeID) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (cj_active_gravity) scheduler_activate(s, ci->mpi.grav.recv);
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (ci_active_gravity) scheduler_activate(s, ci->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (ci_active_gravity) {
+
+            struct link *l =
+                scheduler_activate_send(s, cj->mpi.grav.send, ci_nodeID);
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_gpart(l->t->ci, s);
+          }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (cj_active_gravity)
+            scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
+
+        } else if (cj_nodeID != nodeID) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (ci_active_gravity) scheduler_activate(s, cj->mpi.grav.recv);
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (cj_active_gravity) scheduler_activate(s, cj->mpi.recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (cj_active_gravity) {
+
+            struct link *l =
+                scheduler_activate_send(s, ci->mpi.grav.send, cj_nodeID);
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_gpart(l->t->ci, s);
+          }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (ci_active_gravity)
+            scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
+        }
+#endif
+      }
+    }
+
+    /* End force for hydro ? */
+    else if (t_type == task_type_end_hydro_force) {
+
+      if (cell_is_active_hydro(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* End force for gravity ? */
+    else if (t_type == task_type_end_grav_force) {
+
+      if (cell_is_active_gravity(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* Kick ? */
+    else if (t_type == task_type_kick1 || t_type == task_type_kick2) {
+
+      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
+        scheduler_activate(s, t);
+    }
+
+    /* Hydro ghost tasks ? */
+    else if (t_type == task_type_ghost || t_type == task_type_extra_ghost ||
+             t_type == task_type_ghost_in || t_type == task_type_ghost_out) {
+      if (cell_is_active_hydro(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* logger tasks ? */
+    else if (t->type == task_type_logger) {
+      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e) ||
+          cell_is_active_stars(t->ci, e))
+        scheduler_activate(s, t);
+    }
+
+    /* Gravity stuff ? */
+    else if (t_type == task_type_grav_down || t_type == task_type_grav_mesh ||
+             t_type == task_type_grav_long_range ||
+             t_type == task_type_init_grav ||
+             t_type == task_type_init_grav_out ||
+             t_type == task_type_drift_gpart_out ||
+             t_type == task_type_grav_down_in) {
+      if (cell_is_active_gravity(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* Multipole - Multipole interaction task */
+    else if (t_type == task_type_grav_mm) {
+
+      /* Local pointers. */
+      const struct cell *ci = t->ci;
+      const struct cell *cj = t->cj;
+#ifdef WITH_MPI
+      const int ci_nodeID = ci->nodeID;
+      const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
+#else
+      const int ci_nodeID = nodeID;
+      const int cj_nodeID = nodeID;
+#endif
+      const int ci_active_gravity = cell_is_active_gravity_mm(ci, e);
+      const int cj_active_gravity = cell_is_active_gravity_mm(cj, e);
+
+      if ((ci_active_gravity && ci_nodeID == nodeID) ||
+          (cj_active_gravity && cj_nodeID == nodeID))
+        scheduler_activate(s, t);
+    }
+
+    /* Star ghost tasks ? */
+    else if (t_type == task_type_stars_ghost) {
+      if (cell_is_active_stars(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* Feedback implicit tasks? */
+    else if (t_type == task_type_stars_in || t_type == task_type_stars_out) {
+      if (cell_is_active_stars(t->ci, e)) scheduler_activate(s, t);
+    }
+
+    /* Time-step? */
+    else if (t_type == task_type_timestep) {
+      t->ci->hydro.updated = 0;
+      t->ci->grav.updated = 0;
+      t->ci->stars.updated = 0;
+      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
+        scheduler_activate(s, t);
+    }
+
+    /* Subgrid tasks */
+    else if (t_type == task_type_cooling) {
+      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
+        scheduler_activate(s, t);
+    } else if (t_type == task_type_star_formation) {
+      if (cell_is_active_hydro(t->ci, e) || cell_is_active_gravity(t->ci, e))
+        scheduler_activate(s, t);
+    }
+  }
+}
+
+/**
+ * @brief Mark tasks to be un-skipped and set the sort flags accordingly.
+ *
+ * @return 1 if the space has to be rebuilt, 0 otherwise.
+ */
+int engine_marktasks(struct engine *e) {
+
+  struct scheduler *s = &e->sched;
+  const ticks tic = getticks();
+  int rebuild_space = 0;
+
+  /* Run through the tasks and mark as skip or not. */
+  size_t extra_data[3] = {(size_t)e, (size_t)rebuild_space, (size_t)&e->sched};
+  threadpool_map(&e->threadpool, engine_marktasks_mapper, s->tasks, s->nr_tasks,
+                 sizeof(struct task), 0, extra_data);
+  rebuild_space = extra_data[1];
+
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+
+  /* All is well... */
+  return rebuild_space;
+}
diff --git a/src/entropy_floor.h b/src/entropy_floor.h
new file mode 100644
index 0000000000000000000000000000000000000000..9f2e97ccc815bee1087884d060492ff3715f1c6f
--- /dev/null
+++ b/src/entropy_floor.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ENTROPY_FLOOR_H
+#define SWIFT_ENTROPY_FLOOR_H
+
+/**
+ * @file src/entropy_floor.h
+ * @brief Branches between the different entropy floor models
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+#include "common_io.h"
+#include "error.h"
+#include "inline.h"
+
+/* Import the right entropy floor definition */
+#if defined(ENTROPY_FLOOR_NONE)
+#include "./entropy_floor/none/entropy_floor.h"
+#elif defined(ENTROPY_FLOOR_EAGLE)
+#include "./entropy_floor/EAGLE/entropy_floor.h"
+#endif
+
+#endif /* SWIFT_ENTROPY_FLOOR_H */
diff --git a/src/entropy_floor/EAGLE/entropy_floor.h b/src/entropy_floor/EAGLE/entropy_floor.h
new file mode 100644
index 0000000000000000000000000000000000000000..41d35fa0484cc1ee491a3c6293893ad5d2b5583f
--- /dev/null
+++ b/src/entropy_floor/EAGLE/entropy_floor.h
@@ -0,0 +1,281 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ENTROPY_FLOOR_EAGLE_H
+#define SWIFT_ENTROPY_FLOOR_EAGLE_H
+
+#include "adiabatic_index.h"
+#include "cosmology.h"
+#include "hydro.h"
+#include "hydro_properties.h"
+#include "parser.h"
+#include "units.h"
+
+/**
+ * @file src/entropy_floor/EAGLE/entropy_floor.h
+ * @brief Entropy floor used in the EAGLE model
+ */
+
+/**
+ * @brief Properties of the entropy floor in the EAGLE model.
+ */
+struct entropy_floor_properties {
+
+  /*! Density threshold for the Jeans floor in Hydrogen atoms per cubic cm */
+  float Jeans_density_threshold_H_p_cm3;
+
+  /*! Density threshold for the Jeans floor in internal units */
+  float Jeans_density_threshold;
+
+  /*! Inverse of the density threshold for the Jeans floor in internal units */
+  float Jeans_density_threshold_inv;
+
+  /*! Over-density threshold for the Jeans floor */
+  float Jeans_over_density_threshold;
+
+  /*! Slope of the Jeans floor power-law */
+  float Jeans_gamma_effective;
+
+  /*! Temperature of the Jeans floor at the density threshold in Kelvin */
+  float Jeans_temperature_norm_K;
+
+  /*! Temperature of the Jeans floor at the density thresh. in internal units */
+  float Jeans_temperature_norm;
+
+  /*! Pressure of the Jeans floor at the density thresh. in internal units */
+  float Jeans_pressure_norm;
+
+  /*! Density threshold for the Cool floor in Hydrogen atoms per cubic cm */
+  float Cool_density_threshold_H_p_cm3;
+
+  /*! Density threshold for the Cool floor in internal units */
+  float Cool_density_threshold;
+
+  /*! Inverse of the density threshold for the Cool floor in internal units */
+  float Cool_density_threshold_inv;
+
+  /*! Over-density threshold for the Cool floor */
+  float Cool_over_density_threshold;
+
+  /*! Slope of the Cool floor power-law */
+  float Cool_gamma_effective;
+
+  /*! Temperature of the Cool floor at the density threshold in Kelvin */
+  float Cool_temperature_norm_K;
+
+  /*! Temperature of the Cool floor at the density thresh. in internal units */
+  float Cool_temperature_norm;
+
+  /*! Pressure of the Cool floor at the density thresh. in internal units */
+  float Cool_pressure_norm;
+};
+
+/**
+ * @brief Compute the entropy floor of a given #part.
+ *
+ * Note that the particle is not updated!!
+ *
+ * @param p The #part.
+ * @param cosmo The cosmological model.
+ * @param props The properties of the entropy floor.
+ */
+static INLINE float entropy_floor(
+    const struct part *p, const struct cosmology *cosmo,
+    const struct entropy_floor_properties *props) {
+
+  /* Physical density in internal units */
+  const float rho = hydro_get_physical_density(p, cosmo);
+
+  /* Critical density at this redshift.
+   * Recall that this is 0 in a non-cosmological run */
+  const float rho_crit = cosmo->critical_density;
+  const float rho_crit_baryon = cosmo->Omega_b * rho_crit;
+
+  /* Physical pressure */
+  float pressure = 0.f;
+
+  /* Are we in the regime of the Jeans equation of state? */
+  if ((rho >= rho_crit_baryon * props->Jeans_over_density_threshold) &&
+      (rho >= props->Jeans_density_threshold)) {
+
+    const float pressure_Jeans = props->Jeans_pressure_norm *
+                                 powf(rho * props->Jeans_density_threshold_inv,
+                                      props->Jeans_gamma_effective);
+
+    pressure = max(pressure, pressure_Jeans);
+  }
+
+  /* Are we in the regime of the Cool equation of state? */
+  if ((rho >= rho_crit_baryon * props->Cool_over_density_threshold) &&
+      (rho >= props->Cool_density_threshold)) {
+
+    const float pressure_Cool = props->Cool_pressure_norm *
+                                powf(rho * props->Cool_density_threshold_inv,
+                                     props->Cool_gamma_effective);
+
+    pressure = max(pressure, pressure_Cool);
+  }
+
+  /* Convert to an entropy.
+   * (Recall that the entropy is the same in co-moving and phycial frames) */
+  return gas_entropy_from_pressure(rho, pressure);
+}
+
+/**
+ * @brief Initialise the entropy floor by reading the parameters and converting
+ * to internal units.
+ *
+ * @param params The YAML parameter file.
+ * @param us The system of units used internally.
+ * @param phys_const The physical constants.
+ * @param hydro_props The propoerties of the hydro scheme.
+ * @param props The entropy floor properties to fill.
+ */
+static INLINE void entropy_floor_init(struct entropy_floor_properties *props,
+                                      const struct phys_const *phys_const,
+                                      const struct unit_system *us,
+                                      const struct hydro_props *hydro_props,
+                                      struct swift_params *params) {
+
+  /* Read the parameters in the units they are set */
+  props->Jeans_density_threshold_H_p_cm3 = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Jeans_density_threshold_H_p_cm3");
+  props->Jeans_over_density_threshold = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Jeans_over_density_threshold");
+  props->Jeans_temperature_norm_K = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Jeans_temperature_norm_K");
+  props->Jeans_gamma_effective =
+      parser_get_param_float(params, "EAGLEEntropyFloor:Jeans_gamma_effective");
+
+  props->Cool_density_threshold_H_p_cm3 = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Cool_density_threshold_H_p_cm3");
+  props->Cool_over_density_threshold = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Cool_over_density_threshold");
+  props->Cool_temperature_norm_K = parser_get_param_float(
+      params, "EAGLEEntropyFloor:Cool_temperature_norm_K");
+  props->Cool_gamma_effective =
+      parser_get_param_float(params, "EAGLEEntropyFloor:Cool_gamma_effective");
+
+  /* Cross-check that the input makes sense */
+  if (props->Cool_density_threshold_H_p_cm3 >=
+      props->Jeans_density_threshold_H_p_cm3) {
+    error(
+        "Invalid values for the entrop floor density thresholds. The 'Jeans' "
+        "threshold (%e cm^-3) should be at a higher density than the 'Cool' "
+        "threshold (%e cm^-3)",
+        props->Jeans_density_threshold_H_p_cm3,
+        props->Cool_density_threshold_H_p_cm3);
+  }
+
+  /* Initial Hydrogen abundance (mass fraction) */
+  const double X_H = hydro_props->hydrogen_mass_fraction;
+
+  /* Now convert to internal units assuming primodial Hydrogen abundance */
+  props->Jeans_temperature_norm =
+      props->Jeans_temperature_norm_K /
+      units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE);
+  props->Jeans_density_threshold =
+      props->Jeans_density_threshold_H_p_cm3 /
+      units_cgs_conversion_factor(us, UNIT_CONV_NUMBER_DENSITY) *
+      phys_const->const_proton_mass / X_H;
+
+  props->Cool_temperature_norm =
+      props->Cool_temperature_norm_K /
+      units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE);
+  props->Cool_density_threshold =
+      props->Cool_density_threshold_H_p_cm3 /
+      units_cgs_conversion_factor(us, UNIT_CONV_NUMBER_DENSITY) *
+      phys_const->const_proton_mass / X_H;
+
+  /* We assume neutral gas */
+  const float mean_molecular_weight = hydro_props->mu_neutral;
+
+  /* Get the common terms */
+  props->Jeans_density_threshold_inv = 1.f / props->Jeans_density_threshold;
+  props->Cool_density_threshold_inv = 1.f / props->Cool_density_threshold;
+
+  /* P_norm = (k_B * T) / (m_p * mu) * rho_threshold */
+  props->Jeans_pressure_norm =
+      ((phys_const->const_boltzmann_k * props->Jeans_temperature_norm) /
+       (phys_const->const_proton_mass * mean_molecular_weight)) *
+      props->Jeans_density_threshold;
+
+  props->Cool_pressure_norm =
+      ((phys_const->const_boltzmann_k * props->Cool_temperature_norm) /
+       (phys_const->const_proton_mass * mean_molecular_weight)) *
+      props->Cool_density_threshold;
+}
+
+/**
+ * @brief Print the properties of the entropy floor to stdout.
+ *
+ * @param props The entropy floor properties.
+ */
+static INLINE void entropy_floor_print(
+    const struct entropy_floor_properties *props) {
+
+  message("Entropy floor is 'EAGLE' with:");
+  message("Jeans limiter with slope n=%.3f at rho=%e (%e H/cm^3) and T=%.1f K",
+          props->Jeans_gamma_effective, props->Jeans_density_threshold,
+          props->Jeans_density_threshold_H_p_cm3,
+          props->Jeans_temperature_norm);
+  message(" Cool limiter with slope n=%.3f at rho=%e (%e H/cm^3) and T=%.1f K",
+          props->Cool_gamma_effective, props->Cool_density_threshold,
+          props->Cool_density_threshold_H_p_cm3, props->Cool_temperature_norm);
+}
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of entropy floor to the file
+ * @param h_grp The HDF5 group in which to write
+ */
+INLINE static void entropy_floor_write_flavour(hid_t h_grp) {
+
+  io_write_attribute_s(h_grp, "Entropy floor", "EAGLE");
+}
+#endif
+
+/**
+ * @brief Write an entropy floor struct to the given FILE as a stream of bytes.
+ *
+ * @param props the struct
+ * @param stream the file stream
+ */
+static INLINE void entropy_floor_struct_dump(
+    const struct entropy_floor_properties *props, FILE *stream) {
+
+  restart_write_blocks((void *)props, sizeof(struct entropy_floor_properties),
+                       1, stream, "entropy floor", "entropy floor properties");
+}
+
+/**
+ * @brief Restore a entropy floor struct from the given FILE as a stream of
+ * bytes.
+ *
+ * @param props the struct
+ * @param stream the file stream
+ */
+static INLINE void entropy_floor_struct_restore(
+    struct entropy_floor_properties *props, FILE *stream) {
+
+  restart_read_blocks((void *)props, sizeof(struct entropy_floor_properties), 1,
+                      stream, NULL, "entropy floor properties");
+}
+
+#endif /* SWIFT_ENTROPY_FLOOR_EAGLE_H */
diff --git a/src/entropy_floor/none/entropy_floor.h b/src/entropy_floor/none/entropy_floor.h
new file mode 100644
index 0000000000000000000000000000000000000000..871ef8977e091841128e280184646e3be02957fd
--- /dev/null
+++ b/src/entropy_floor/none/entropy_floor.h
@@ -0,0 +1,118 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ENTROPY_FLOOR_NONE_H
+#define SWIFT_ENTROPY_FLOOR_NONE_H
+
+/**
+ * @file src/entropy_floor/none/entropy_floor.h
+ * @brief Empty functions used for simulations without entropy
+ * floors.
+ */
+
+struct cosmology;
+struct hydro_props;
+struct part;
+
+/**
+ * @brief Properties of the entropy floor.
+ *
+ * Nothing here.
+ */
+struct entropy_floor_properties {};
+
+/**
+ * @brief Compute the entropy floor of a given #part.
+ *
+ * Simply return 0 (no floor).
+ *
+ * @param p The #part.
+ * @param cosmo The cosmological model.
+ * @param props The properties of the entropy floor.
+ */
+static INLINE float entropy_floor(
+    const struct part *p, const struct cosmology *cosmo,
+    const struct entropy_floor_properties *props) {
+
+  return 0.f;
+}
+
+/**
+ * @brief Initialise the entropy floor by reading the parameters and converting
+ * to internal units.
+ *
+ * Nothing to do here.
+ *
+ * @param params The YAML parameter file.
+ * @param us The system of units used internally.
+ * @param phys_cont The physical constants.
+ * @param props The entropy floor properties to fill.
+ */
+static INLINE void entropy_floor_init(struct entropy_floor_properties *props,
+                                      const struct phys_const *phys_const,
+                                      const struct unit_system *us,
+                                      const struct hydro_props *hydro_props,
+                                      struct swift_params *params) {}
+
+/**
+ * @brief Print the properties of the entropy floor to stdout.
+ *
+ * @param props The entropy floor properties.
+ */
+static INLINE void entropy_floor_print(
+    const struct entropy_floor_properties *props) {
+
+  message("Entropy floor is 'no entropy floor'.");
+}
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of entropy floor to the file
+ * @param h_grp The HDF5 group in which to write
+ */
+INLINE static void entropy_floor_write_flavour(hid_t h_grp) {
+
+  io_write_attribute_s(h_grp, "Entropy floor", "None");
+}
+#endif
+
+/**
+ * @brief Write an entropy floor struct to the given FILE as a stream of bytes.
+ *
+ * Nothing to do here.
+ *
+ * @param props the struct
+ * @param stream the file stream
+ */
+static INLINE void entropy_floor_struct_dump(
+    const struct entropy_floor_properties *props, FILE *stream) {}
+
+/**
+ * @brief Restore a entropy floor struct from the given FILE as a stream of
+ * bytes.
+ *
+ * Nothing to do here.
+ *
+ * @param props the struct
+ * @param stream the file stream
+ */
+static INLINE void entropy_floor_struct_restore(
+    struct entropy_floor_properties *props, FILE *stream) {}
+
+#endif /* SWIFT_ENTROPY_FLOOR_NONE_H */
diff --git a/src/equation_of_state/planetary/hm80.h b/src/equation_of_state/planetary/hm80.h
index 5e80c240018756cb57cc8974df4974a6cc53724a..38e2c9e4022387ee5ab79fafbedc6fc0dc47f49d 100644
--- a/src/equation_of_state/planetary/hm80.h
+++ b/src/equation_of_state/planetary/hm80.h
@@ -86,21 +86,19 @@ INLINE static void load_table_HM80(struct HM80_params *mat, char *table_file) {
 
   // Load table contents from file
   FILE *f = fopen(table_file, "r");
-  int c;
+  if (f == NULL) error("Failed to open the HM80 EoS file '%s'", table_file);
 
   // Ignore header lines
   char buffer[100];
   for (int i = 0; i < 4; i++) {
     if (fgets(buffer, 100, f) == NULL)
-      error("Something incorrect happening with the file header.");
+      error("Failed to read the HM80 EoS file header %s", table_file);
   }
 
   // Table properties
-  c = fscanf(f, "%f %f %d %f %f %d", &mat->log_rho_min, &mat->log_rho_max,
-             &mat->num_rho, &mat->log_u_min, &mat->log_u_max, &mat->num_u);
-  if (c != 6) {
-    error("Failed to read EOS table %s", table_file);
-  }
+  int c = fscanf(f, "%f %f %d %f %f %d", &mat->log_rho_min, &mat->log_rho_max,
+                 &mat->num_rho, &mat->log_u_min, &mat->log_u_max, &mat->num_u);
+  if (c != 6) error("Failed to read the HM80 EoS table %s", table_file);
   mat->log_rho_step =
       (mat->log_rho_max - mat->log_rho_min) / (mat->num_rho - 1);
   mat->log_u_step = (mat->log_u_max - mat->log_u_min) / (mat->num_u - 1);
@@ -115,9 +113,7 @@ INLINE static void load_table_HM80(struct HM80_params *mat, char *table_file) {
   for (int i_rho = 0; i_rho < mat->num_rho; i_rho++) {
     for (int i_u = 0; i_u < mat->num_u; i_u++) {
       c = fscanf(f, "%f", &mat->table_log_P_rho_u[i_rho * mat->num_u + i_u]);
-      if (c != 1) {
-        error("Failed to read EOS table");
-      }
+      if (c != 1) error("Failed to read the HM80 EoS table %s", table_file);
     }
   }
   fclose(f);
diff --git a/src/equation_of_state/planetary/sesame.h b/src/equation_of_state/planetary/sesame.h
index d958c9b9d09ffe37eefd77ad0384d85bf8c055dd..11c16964602b28c0d1a080b6c262ff20c1f5b9cb 100644
--- a/src/equation_of_state/planetary/sesame.h
+++ b/src/equation_of_state/planetary/sesame.h
@@ -82,21 +82,19 @@ INLINE static void load_table_SESAME(struct SESAME_params *mat,
 
   // Load table contents from file
   FILE *f = fopen(table_file, "r");
-  int c;
+  if (f == NULL) error("Failed to open the SESAME EoS file '%s'", table_file);
 
   // Ignore header lines
   char buffer[100];
   for (int i = 0; i < 5; i++) {
     if (fgets(buffer, 100, f) == NULL)
-      error("Something incorrect happening with the file header.");
+      error("Failed to read the SESAME EoS file header %s", table_file);
   }
   float ignore;
 
   // Table properties
-  c = fscanf(f, "%d %d", &mat->num_rho, &mat->num_T);
-  if (c != 2) {
-    error("Failed to read EOS table %s", table_file);
-  }
+  int c = fscanf(f, "%d %d", &mat->num_rho, &mat->num_T);
+  if (c != 2) error("Failed to read the SESAME EoS table %s", table_file);
 
   // Ignore the first elements of rho = 0, T = 0
   mat->num_rho--;
@@ -118,23 +116,17 @@ INLINE static void load_table_SESAME(struct SESAME_params *mat,
     // Ignore the first elements of rho = 0, T = 0
     if (i_rho == -1) {
       c = fscanf(f, "%f", &ignore);
-      if (c != 1) {
-        error("Failed to read EOS table %s", table_file);
-      }
+      if (c != 1) error("Failed to read the SESAME EoS table %s", table_file);
     } else {
       c = fscanf(f, "%f", &mat->table_log_rho[i_rho]);
-      if (c != 1) {
-        error("Failed to read EOS table %s", table_file);
-      }
+      if (c != 1) error("Failed to read the SESAME EoS table %s", table_file);
     }
   }
 
   // Temperatures (ignored)
   for (int i_T = -1; i_T < mat->num_T; i_T++) {
     c = fscanf(f, "%f", &ignore);
-    if (c != 1) {
-      error("Failed to read EOS table %s", table_file);
-    }
+    if (c != 1) error("Failed to read the SESAME EoS table %s", table_file);
   }
 
   // Sp. int. energies (not log yet), pressures, sound speeds, and entropies
@@ -143,18 +135,14 @@ INLINE static void load_table_SESAME(struct SESAME_params *mat,
       // Ignore the first elements of rho = 0, T = 0
       if ((i_T == -1) || (i_rho == -1)) {
         c = fscanf(f, "%f %f %f %f", &ignore, &ignore, &ignore, &ignore);
-        if (c != 4) {
-          error("Failed to read EOS table %s", table_file);
-        }
+        if (c != 4) error("Failed to read the SESAME EoS table %s", table_file);
       } else {
         c = fscanf(f, "%f %f %f %f",
                    &mat->table_log_u_rho_T[i_rho * mat->num_T + i_T],
                    &mat->table_P_rho_T[i_rho * mat->num_T + i_T],
                    &mat->table_c_rho_T[i_rho * mat->num_T + i_T],
                    &mat->table_s_rho_T[i_rho * mat->num_T + i_T]);
-        if (c != 4) {
-          error("Failed to read EOS table %s", table_file);
-        }
+        if (c != 4) error("Failed to read the SESAME EoS table %s", table_file);
       }
     }
   }
diff --git a/src/equation_of_state/planetary/tillotson.h b/src/equation_of_state/planetary/tillotson.h
index 1a4210699380b3b0398506dde7fce6ca8055e4dc..609cc8dcb33e6533f68e13430f6654832af4d0a5 100644
--- a/src/equation_of_state/planetary/tillotson.h
+++ b/src/equation_of_state/planetary/tillotson.h
@@ -243,8 +243,8 @@ INLINE static float Til_soundspeed_from_internal_energy(
     P_c = (mat->a + mat->b * w_inv) * density * u + mat->A * mu +
           mat->B * mu * mu;
   }
-  c_sq_c = P_c * rho_inv * (1.f - mat->a - mat->b * w_inv) +
-           mat->b * (w - 1.f) * w_inv_sq * (2 * u + P_c * rho_inv) +
+  c_sq_c = P_c * rho_inv * (1.f + mat->a + mat->b * w_inv) +
+           mat->b * (w - 1.f) * w_inv_sq * (2.f * u - P_c * rho_inv) +
            rho_inv * (mat->A + mat->B * (eta_sq - 1.f));
 
   c_sq_c = fmax(c_sq_c, mat->A * rho_0_inv);
@@ -253,14 +253,15 @@ INLINE static float Til_soundspeed_from_internal_energy(
   P_e = mat->a * density * u +
         (mat->b * density * u * w_inv + mat->A * mu * exp_beta) * exp_alpha;
 
-  c_sq_e = P_e * rho_inv * (1.f - mat->a) +
-           (mat->b * density * u / (w * w * eta_sq) *
-                (rho_inv / mat->u_0 * (2 * u - P_e * rho_inv * eta_sq) +
-                 2.f * mat->alpha * nu * rho_0_inv) +
-            mat->A * rho_0_inv *
-                (1 + mu / eta_sq * (mat->beta + 2.f * mat->alpha * nu - eta)) *
-                exp_beta) *
-               exp_alpha;
+  c_sq_e =
+      P_e * rho_inv * (1.f + mat->a + mat->b * w_inv * exp_alpha) +
+      (mat->b * density * u * w_inv_sq / eta_sq *
+           (rho_inv / mat->u_0 * (2.f * u - P_e * rho_inv) +
+            2.f * mat->alpha * nu * w * rho_0_inv) +
+       mat->A * rho_0_inv *
+           (1.f + mu / eta_sq * (mat->beta + 2.f * mat->alpha * nu - eta)) *
+           exp_beta) *
+          exp_alpha;
 
   // Condensed or cold state
   if ((1.f < eta) || (u < mat->u_iv)) {
diff --git a/src/exp10.h b/src/exp10.h
new file mode 100644
index 0000000000000000000000000000000000000000..b995bfdb3e1b6b1cb60bd4b60708413ea6c96f9f
--- /dev/null
+++ b/src/exp10.h
@@ -0,0 +1,63 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EXP10_H
+#define SWIFT_EXP10_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <math.h>
+
+#ifndef __GNUC__
+
+/* Local headers. */
+#include "inline.h"
+
+/**
+ * @brief Raises 10 to the power of the argument.
+ *
+ * This function is only used as a replacement for compilers that do
+ * not implement GNU extensions to the C language.
+ *
+ * @param x The input value.
+ */
+__attribute__((always_inline, const)) INLINE static double exp10(
+    const double x) {
+
+  return exp(x * M_LN10);
+}
+
+/**
+ * @brief Raises 10 to the power of the argument.
+ *
+ * This function is only used as a replacement for compilers that do
+ * not implement GNU extensions to the C language.
+ *
+ * @param x The input value.
+ */
+__attribute__((always_inline, const)) INLINE static float exp10f(
+    const float x) {
+
+  return expf(x * (float)M_LN10);
+}
+
+#endif /* __GNUC__ */
+
+#endif /* SWIFT_EXP10_H */
diff --git a/src/gravity.c b/src/gravity.c
index 1f88490b57d944fc69e7b2e07dcad39294dba732..53ab6b816f964e4e2b071df1e3192d972c5567de 100644
--- a/src/gravity.c
+++ b/src/gravity.c
@@ -487,7 +487,7 @@ void gravity_exact_force_compute_mapper(void *map_data, int nr_gparts,
     long long id = 0;
     if (gpi->type == swift_type_gas)
       id = parts[-gpi->id_or_neg_offset].id;
-    else if (gpi->type == swift_type_star)
+    else if (gpi->type == swift_type_stars)
       id = sparts[-gpi->id_or_neg_offset].id;
     else if (gpi->type == swift_type_black_hole)
       error("Unexisting type");
@@ -676,7 +676,7 @@ void gravity_exact_force_check(struct space *s, const struct engine *e,
     long long id = 0;
     if (gpi->type == swift_type_gas)
       id = parts[-gpi->id_or_neg_offset].id;
-    else if (gpi->type == swift_type_star)
+    else if (gpi->type == swift_type_stars)
       id = sparts[-gpi->id_or_neg_offset].id;
     else if (gpi->type == swift_type_black_hole)
       error("Unexisting type");
@@ -730,7 +730,7 @@ void gravity_exact_force_check(struct space *s, const struct engine *e,
       long long id = 0;
       if (gpi->type == swift_type_gas)
         id = parts[-gpi->id_or_neg_offset].id;
-      else if (gpi->type == swift_type_star)
+      else if (gpi->type == swift_type_stars)
         id = sparts[-gpi->id_or_neg_offset].id;
       else if (gpi->type == swift_type_black_hole)
         error("Unexisting type");
diff --git a/src/gravity/Default/gravity.h b/src/gravity/Default/gravity.h
index 2713c9ee7affca4f06b369d038916f76b8c2ee48..6d1270c952100f3a25202fcdb22be09f9acaa8d9 100644
--- a/src/gravity/Default/gravity.h
+++ b/src/gravity/Default/gravity.h
@@ -22,6 +22,7 @@
 
 #include <float.h>
 
+/* Local includes. */
 #include "cosmology.h"
 #include "gravity_properties.h"
 #include "kernel_gravity.h"
@@ -39,7 +40,7 @@ __attribute__((always_inline)) INLINE static float gravity_get_mass(
 }
 
 /**
- * @brief Returns the softening of a particle
+ * @brief Returns the current co-moving softening of a particle
  *
  * @param gp The particle of interest
  * @param grav_props The global gravity properties.
@@ -155,6 +156,7 @@ __attribute__((always_inline)) INLINE static void gravity_init_gpart(
 
 #ifdef SWIFT_DEBUG_CHECKS
   gp->num_interacted = 0;
+  gp->initialised = 1;
 #endif
 }
 
@@ -187,6 +189,10 @@ __attribute__((always_inline)) INLINE static void gravity_end_force(
   gp->a_grav_PM[1] *= const_G;
   gp->a_grav_PM[2] *= const_G;
 #endif
+
+#ifdef SWIFT_DEBUG_CHECKS
+  gp->initialised = 0; /* Ready for next step */
+#endif
 }
 
 /**
diff --git a/src/gravity/Default/gravity_iact.h b/src/gravity/Default/gravity_iact.h
index 71e5007a49bda25a8b65d4a5d3733d0027aa2682..6fce3ddd512018e9ea4be21111c75904c77cb925 100644
--- a/src/gravity/Default/gravity_iact.h
+++ b/src/gravity/Default/gravity_iact.h
@@ -166,7 +166,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_grav_pm_full(
 
   /* Compute the derivatives of the potential */
   struct potential_derivatives_M2P d;
-  compute_potential_derivatives_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 0, 0.f,
+  potential_derivatives_compute_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 0, 0.f,
                                     &d);
 
   /* 0th order contributions */
@@ -271,7 +271,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_grav_pm_truncated(
 
   /* Compute the derivatives of the potential */
   struct potential_derivatives_M2P d;
-  compute_potential_derivatives_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 1,
+  potential_derivatives_compute_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 1,
                                     r_s_inv, &d);
 
   /* 0th order contributions */
diff --git a/src/gravity/Default/gravity_part.h b/src/gravity/Default/gravity_part.h
index bd73c56da82877415f5abc9edf41ede1c551f16f..f065e6d3a2994ff1e522fc3ae9a38fcf591d92af 100644
--- a/src/gravity/Default/gravity_part.h
+++ b/src/gravity/Default/gravity_part.h
@@ -55,6 +55,9 @@ struct gpart {
   /* Time of the last kick */
   integertime_t ti_kick;
 
+  /* Has this particle been initialised? */
+  int initialised;
+
 #endif
 
 #ifdef SWIFT_GRAVITY_FORCE_CHECKS
diff --git a/src/gravity/Potential/gravity.h b/src/gravity/Potential/gravity.h
index 3a6c0fba18856b57911d49bcee6915f5003e2e68..7d38e9126f1a313b169092b080ebc312c4bbe1bc 100644
--- a/src/gravity/Potential/gravity.h
+++ b/src/gravity/Potential/gravity.h
@@ -39,7 +39,7 @@ __attribute__((always_inline)) INLINE static float gravity_get_mass(
 }
 
 /**
- * @brief Returns the softening of a particle
+ * @brief Returns the current co-moving softening of a particle
  *
  * @param gp The particle of interest
  * @param grav_props The global gravity properties.
@@ -151,6 +151,7 @@ __attribute__((always_inline)) INLINE static void gravity_init_gpart(
 
 #ifdef SWIFT_DEBUG_CHECKS
   gp->num_interacted = 0;
+  gp->initialised = 1;
 #endif
 }
 
@@ -183,6 +184,10 @@ __attribute__((always_inline)) INLINE static void gravity_end_force(
   gp->a_grav_PM[1] *= const_G;
   gp->a_grav_PM[2] *= const_G;
 #endif
+
+#ifdef SWIFT_DEBUG_CHECKS
+  gp->initialised = 0; /* Ready for next step */
+#endif
 }
 
 /**
diff --git a/src/gravity/Potential/gravity_iact.h b/src/gravity/Potential/gravity_iact.h
index fdc8c17da1576b85026c3e551dd70d27bc186612..f2094f6ecd5b31b94ebfe7a64f42fbd289a0c81c 100644
--- a/src/gravity/Potential/gravity_iact.h
+++ b/src/gravity/Potential/gravity_iact.h
@@ -169,7 +169,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_grav_pm_full(
 
   /* Compute the derivatives of the potential */
   struct potential_derivatives_M2P d;
-  compute_potential_derivatives_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 0, 0.f,
+  potential_derivatives_compute_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 0, 0.f,
                                     &d);
 
   /* 0th order contributions */
@@ -281,7 +281,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_grav_pm_truncated(
 
   /* Compute the derivatives of the potential */
   struct potential_derivatives_M2P d;
-  compute_potential_derivatives_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 1,
+  potential_derivatives_compute_M2P(r_x, r_y, r_z, r2, r_inv, h, h_inv, 1,
                                     r_s_inv, &d);
 
   /* 0th order contributions */
diff --git a/src/gravity/Potential/gravity_part.h b/src/gravity/Potential/gravity_part.h
index 252c18a4dc63c9cea4211ed8ab23eb692f064f00..229d8011088d4a4a70ff9f287597a0ba463ca951 100644
--- a/src/gravity/Potential/gravity_part.h
+++ b/src/gravity/Potential/gravity_part.h
@@ -58,6 +58,9 @@ struct gpart {
   /* Time of the last kick */
   integertime_t ti_kick;
 
+  /* Has this particle been initialised? */
+  int initialised;
+
 #endif
 
 #ifdef SWIFT_GRAVITY_FORCE_CHECKS
diff --git a/src/gravity_cache.h b/src/gravity_cache.h
index 821f044429b445c28ff8ae39b8dc65304dd2b42d..6453d1eb92814f0e20cf25fa5996b920e523812d 100644
--- a/src/gravity_cache.h
+++ b/src/gravity_cache.h
@@ -208,12 +208,20 @@ __attribute__((always_inline)) INLINE static void gravity_cache_populate(
 
   /* Fill the input caches */
   for (int i = 0; i < gcount; ++i) {
+
     x[i] = (float)(gparts[i].x[0] - shift[0]);
     y[i] = (float)(gparts[i].x[1] - shift[1]);
     z[i] = (float)(gparts[i].x[2] - shift[2]);
     epsilon[i] = gravity_get_softening(&gparts[i], grav_props);
-    m[i] = gparts[i].mass;
-    active[i] = (int)(gparts[i].time_bin <= max_active_bin);
+
+    /* Make a dummy particle out of the inhibted ones */
+    if (gparts[i].time_bin == time_bin_inhibited) {
+      m[i] = 0.f;
+      active[i] = 0;
+    } else {
+      m[i] = gparts[i].mass;
+      active[i] = (int)(gparts[i].time_bin <= max_active_bin);
+    }
 
     /* Distance to the CoM of the other cell. */
     float dx = x[i] - CoM[0];
@@ -294,8 +302,15 @@ gravity_cache_populate_no_mpole(const timebin_t max_active_bin,
     y[i] = (float)(gparts[i].x[1] - shift[1]);
     z[i] = (float)(gparts[i].x[2] - shift[2]);
     epsilon[i] = gravity_get_softening(&gparts[i], grav_props);
-    m[i] = gparts[i].mass;
-    active[i] = (int)(gparts[i].time_bin <= max_active_bin);
+
+    /* Make a dummy particle out of the inhibted ones */
+    if (gparts[i].time_bin == time_bin_inhibited) {
+      m[i] = 0.f;
+      active[i] = 0;
+    } else {
+      m[i] = gparts[i].mass;
+      active[i] = (int)(gparts[i].time_bin <= max_active_bin);
+    }
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
diff --git a/src/gravity_derivatives.h b/src/gravity_derivatives.h
index 756fb7af66d4cb695ba014452e424843b1c7c25b..3dcffe1cc04c5e10d3b2353b7e21c532747c1475 100644
--- a/src/gravity_derivatives.h
+++ b/src/gravity_derivatives.h
@@ -125,6 +125,65 @@ struct potential_derivatives_M2P {
 #endif
 };
 
+/**
+ * @brief Converts the derivatives from a distance vector to its opposite.
+ *
+ * From a series of tensors D_xxx(r), compute D_xxx(-r).
+ * This can be computed efficiently by flipping the sign of all the odd
+ * derivative terms.
+ *
+ * @param pot The derivatives of the potential.
+ */
+__attribute__((always_inline)) INLINE static void
+potential_derivatives_flip_signs(struct potential_derivatives_M2L *pot) {
+
+#if SELF_GRAVITY_MULTIPOLE_ORDER > 0
+  /* 1st order terms */
+  pot->D_100 = -pot->D_100;
+  pot->D_010 = -pot->D_010;
+  pot->D_001 = -pot->D_001;
+#endif
+
+#if SELF_GRAVITY_MULTIPOLE_ORDER > 2
+  /* 3rd order terms */
+  pot->D_300 = -pot->D_300;
+  pot->D_030 = -pot->D_030;
+  pot->D_003 = -pot->D_003;
+  pot->D_210 = -pot->D_210;
+  pot->D_201 = -pot->D_201;
+  pot->D_021 = -pot->D_021;
+  pot->D_120 = -pot->D_120;
+  pot->D_012 = -pot->D_012;
+  pot->D_102 = -pot->D_102;
+  pot->D_111 = -pot->D_111;
+#endif
+
+#if SELF_GRAVITY_MULTIPOLE_ORDER > 4
+  /* 5th order terms */
+  pot->D_500 = -pot->D_500;
+  pot->D_050 = -pot->D_050;
+  pot->D_005 = -pot->D_005;
+  pot->D_410 = -pot->D_410;
+  pot->D_401 = -pot->D_401;
+  pot->D_041 = -pot->D_041;
+  pot->D_140 = -pot->D_140;
+  pot->D_014 = -pot->D_014;
+  pot->D_104 = -pot->D_104;
+  pot->D_320 = -pot->D_320;
+  pot->D_302 = -pot->D_302;
+  pot->D_032 = -pot->D_032;
+  pot->D_230 = -pot->D_230;
+  pot->D_023 = -pot->D_023;
+  pot->D_203 = -pot->D_203;
+  pot->D_311 = -pot->D_311;
+  pot->D_131 = -pot->D_131;
+  pot->D_113 = -pot->D_113;
+  pot->D_122 = -pot->D_122;
+  pot->D_212 = -pot->D_212;
+  pot->D_221 = -pot->D_221;
+#endif
+}
+
 /**
  * @brief Compute all the relevent derivatives of the softened and truncated
  * gravitational potential for the M2L kernel.
@@ -141,7 +200,7 @@ struct potential_derivatives_M2P {
  * @param pot (return) The structure containing all the derivatives.
  */
 __attribute__((always_inline)) INLINE static void
-compute_potential_derivatives_M2L(const float r_x, const float r_y,
+potential_derivatives_compute_M2L(const float r_x, const float r_y,
                                   const float r_z, const float r2,
                                   const float r_inv, const float eps,
                                   const float eps_inv, const int periodic,
@@ -397,7 +456,7 @@ compute_potential_derivatives_M2L(const float r_x, const float r_y,
  * @param pot (return) The structure containing all the derivatives.
  */
 __attribute__((always_inline)) INLINE static void
-compute_potential_derivatives_M2P(const float r_x, const float r_y,
+potential_derivatives_compute_M2P(const float r_x, const float r_y,
                                   const float r_z, const float r2,
                                   const float r_inv, const float eps,
                                   const float eps_inv, const int periodic,
diff --git a/src/gravity_properties.c b/src/gravity_properties.c
index fc1ce1d62e02c32d44667d602448fc4eb3a65344..60c0bd05ba8cb2234284154b4383bec07f30756d 100644
--- a/src/gravity_properties.c
+++ b/src/gravity_properties.c
@@ -39,7 +39,8 @@
 #define gravity_props_default_rebuild_frequency 0.01f
 
 void gravity_props_init(struct gravity_props *p, struct swift_params *params,
-                        const struct cosmology *cosmo, int with_cosmology) {
+                        const struct cosmology *cosmo, int with_cosmology,
+                        int periodic) {
 
   /* Tree updates */
   p->rebuild_frequency =
@@ -50,19 +51,31 @@ void gravity_props_init(struct gravity_props *p, struct swift_params *params,
     error("Invalid tree rebuild frequency. Must be in [0., 1.]");
 
   /* Tree-PM parameters */
-  p->mesh_size = parser_get_param_int(params, "Gravity:mesh_side_length");
-  p->a_smooth = parser_get_opt_param_float(params, "Gravity:a_smooth",
-                                           gravity_props_default_a_smooth);
-  p->r_cut_max_ratio = parser_get_opt_param_float(
-      params, "Gravity:r_cut_max", gravity_props_default_r_cut_max);
-  p->r_cut_min_ratio = parser_get_opt_param_float(
-      params, "Gravity:r_cut_min", gravity_props_default_r_cut_min);
-
-  if (p->mesh_size % 2 != 0)
-    error("The mesh side-length must be an even number.");
-
-  if (p->a_smooth <= 0.)
-    error("The mesh smoothing scale 'a_smooth' must be > 0.");
+  if (periodic) {
+    p->mesh_size = parser_get_param_int(params, "Gravity:mesh_side_length");
+    p->a_smooth = parser_get_opt_param_float(params, "Gravity:a_smooth",
+                                             gravity_props_default_a_smooth);
+    p->r_cut_max_ratio = parser_get_opt_param_float(
+        params, "Gravity:r_cut_max", gravity_props_default_r_cut_max);
+    p->r_cut_min_ratio = parser_get_opt_param_float(
+        params, "Gravity:r_cut_min", gravity_props_default_r_cut_min);
+
+    /* Some basic checks of what we read */
+    if (p->mesh_size % 2 != 0)
+      error("The mesh side-length must be an even number.");
+
+    if (p->a_smooth <= 0.)
+      error("The mesh smoothing scale 'a_smooth' must be > 0.");
+
+    if (2. * p->a_smooth * p->r_cut_max_ratio > p->mesh_size)
+      error("Mesh too small given r_cut_max. Should be at least %d cells wide.",
+            (int)(2. * p->a_smooth * p->r_cut_max_ratio) + 1);
+  } else {
+    p->mesh_size = 0;
+    p->a_smooth = 0.f;
+    p->r_cut_min_ratio = 0.f;
+    p->r_cut_max_ratio = 0.f;
+  }
 
   /* Time integration */
   p->eta = parser_get_param_float(params, "Gravity:eta");
@@ -86,10 +99,11 @@ void gravity_props_init(struct gravity_props *p, struct swift_params *params,
   }
 
   /* Set the softening to the current time */
-  gravity_update(p, cosmo);
+  gravity_props_update(p, cosmo);
 }
 
-void gravity_update(struct gravity_props *p, const struct cosmology *cosmo) {
+void gravity_props_update(struct gravity_props *p,
+                          const struct cosmology *cosmo) {
 
   /* Current softening lengths */
   double softening;
@@ -157,20 +171,22 @@ void gravity_props_print_snapshot(hid_t h_grpgrav,
   io_write_attribute_s(h_grpgrav, "Softening style",
                        kernel_gravity_softening_name);
   io_write_attribute_f(
-      h_grpgrav, "Comoving softening length",
+      h_grpgrav, "Comoving softening length [internal units]",
       p->epsilon_comoving * kernel_gravity_softening_plummer_equivalent);
-  io_write_attribute_f(h_grpgrav,
-                       "Comoving Softening length (Plummer equivalent)",
-                       p->epsilon_comoving);
   io_write_attribute_f(
-      h_grpgrav, "Maximal physical softening length",
+      h_grpgrav,
+      "Comoving Softening length (Plummer equivalent)  [internal units]",
+      p->epsilon_comoving);
+  io_write_attribute_f(
+      h_grpgrav, "Maximal physical softening length  [internal units]",
       p->epsilon_max_physical * kernel_gravity_softening_plummer_equivalent);
   io_write_attribute_f(h_grpgrav,
-                       "Maximal physical softening length (Plummer equivalent)",
+                       "Maximal physical softening length (Plummer equivalent) "
+                       " [internal units]",
                        p->epsilon_max_physical);
   io_write_attribute_f(h_grpgrav, "Opening angle", p->theta_crit);
   io_write_attribute_s(h_grpgrav, "Scheme", GRAVITY_IMPLEMENTATION);
-  io_write_attribute_d(h_grpgrav, "MM order", SELF_GRAVITY_MULTIPOLE_ORDER);
+  io_write_attribute_i(h_grpgrav, "MM order", SELF_GRAVITY_MULTIPOLE_ORDER);
   io_write_attribute_f(h_grpgrav, "Mesh a_smooth", p->a_smooth);
   io_write_attribute_f(h_grpgrav, "Mesh r_cut_max ratio", p->r_cut_max_ratio);
   io_write_attribute_f(h_grpgrav, "Mesh r_cut_min ratio", p->r_cut_min_ratio);
diff --git a/src/gravity_properties.h b/src/gravity_properties.h
index 62dbab3605fb2dcfc4ae65e54c0b5f913d714c16..09c8ef8ffa1d6cc4effa4895614106217a2861a9 100644
--- a/src/gravity_properties.h
+++ b/src/gravity_properties.h
@@ -73,7 +73,7 @@ struct gravity_props {
   /*! Maxium physical softening */
   double epsilon_max_physical;
 
-  /*! Current sftening length */
+  /*! Current softening length */
   float epsilon_cur;
 
   /*! Square of current softening length */
@@ -88,8 +88,10 @@ struct gravity_props {
 
 void gravity_props_print(const struct gravity_props *p);
 void gravity_props_init(struct gravity_props *p, struct swift_params *params,
-                        const struct cosmology *cosmo, int with_cosmology);
-void gravity_update(struct gravity_props *p, const struct cosmology *cosmo);
+                        const struct cosmology *cosmo, int with_cosmology,
+                        int periodic);
+void gravity_props_update(struct gravity_props *p,
+                          const struct cosmology *cosmo);
 
 #if defined(HAVE_HDF5)
 void gravity_props_print_snapshot(hid_t h_grpsph,
diff --git a/src/hydro.h b/src/hydro.h
index b3716996cc4da68f9445adccd12315b32d81a34c..3bf7d2228b528796a8717d6a5ab17fda6f569d25 100644
--- a/src/hydro.h
+++ b/src/hydro.h
@@ -45,6 +45,12 @@
 #include "./hydro/PressureEnergy/hydro.h"
 #include "./hydro/PressureEnergy/hydro_iact.h"
 #define SPH_IMPLEMENTATION "Pressure-Energy SPH (Hopkins 2013)"
+#elif defined(HOPKINS_PU_SPH_MONAGHAN)
+#include "./hydro/PressureEnergyMorrisMonaghanAV/hydro.h"
+#include "./hydro/PressureEnergyMorrisMonaghanAV/hydro_iact.h"
+#define SPH_IMPLEMENTATION                                                \
+  "Pressure-Energy SPH (Hopkins 2013) with a Morris and Monaghan (1997) " \
+  "variable artificial viscosity."
 #elif defined(DEFAULT_SPH)
 #include "./hydro/Default/hydro.h"
 #include "./hydro/Default/hydro_iact.h"
@@ -66,6 +72,11 @@
 #include "./hydro/Planetary/hydro.h"
 #include "./hydro/Planetary/hydro_iact.h"
 #define SPH_IMPLEMENTATION "Minimal version of SPH with multiple materials"
+#elif defined(ANARCHY_PU_SPH)
+#include "./hydro/AnarchyPU/hydro.h"
+#include "./hydro/AnarchyPU/hydro_iact.h"
+#define SPH_IMPLEMENTATION \
+  "ANARCHY (Pressure-Energy) SPH (Dalla Vecchia+ in prep)"
 #else
 #error "Invalid choice of SPH variant"
 #endif
diff --git a/src/hydro/AnarchyPU/hydro.h b/src/hydro/AnarchyPU/hydro.h
new file mode 100644
index 0000000000000000000000000000000000000000..9bb53f290acb16b4f9efc44e430a78a6d1f5c5ff
--- /dev/null
+++ b/src/hydro/AnarchyPU/hydro.h
@@ -0,0 +1,947 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ANARCHY_PU_HYDRO_H
+#define SWIFT_ANARCHY_PU_HYDRO_H
+
+/**
+ * @file PressureEnergy/hydro.h
+ * @brief P-U conservative implementation of SPH (Non-neighbour loop
+ * equations)
+ *
+ * The thermal variable is the internal energy (u). A simple constant
+ * viscosity term with a Balsara switch is implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * This implementation corresponds to the one presented in the SWIFT
+ * documentation and in Hopkins, "A general class of Lagrangian smoothed
+ * particle hydrodynamics methods and implications for fluid mixing problems",
+ * MNRAS, 2013.
+ */
+
+#include "adiabatic_index.h"
+#include "approx_math.h"
+#include "cosmology.h"
+#include "dimension.h"
+#include "equation_of_state.h"
+#include "hydro_properties.h"
+#include "hydro_space.h"
+#include "kernel_hydro.h"
+#include "minmax.h"
+
+#include <float.h>
+
+/**
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not internal energy, this function computes the internal
+ * energy from the thermodynamic variable.
+ *
+ * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
+
+  return xp->u_full;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not internal energy, this function computes the internal
+ * energy from the thermodynamic variable and converts it to
+ * physical coordinates.
+ *
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
+                                   const struct cosmology *cosmo) {
+
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
+  return p->u * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving pressure of a particle
+ *
+ * Computes the pressure based on the particle's properties.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_pressure(
+    const struct part *restrict p) {
+
+  return p->pressure_bar;
+}
+
+/**
+ * @brief Returns the physical pressure of a particle
+ *
+ * Computes the pressure based on the particle's properties and
+ * convert it to physical coordinates.
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
+    const struct part *restrict p, const struct cosmology *cosmo) {
+
+  return cosmo->a_factor_pressure * p->pressure_bar;
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not entropy, this function computes the entropy from
+ * the thermodynamic variable.
+ *
+ * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
+    const struct part *restrict p, const struct xpart *restrict xp) {
+
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not entropy, this function computes the entropy from
+ * the thermodynamic variable and converts it to
+ * physical coordinates.
+ *
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the comoving sound speed of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_soundspeed(const struct part *restrict p) {
+
+  /* Compute the sound speed -- see theory section for justification */
+  /* IDEAL GAS ONLY -- P-U does not work with generic EoS. */
+  const float square_rooted = sqrtf(hydro_gamma * p->pressure_bar / p->rho);
+
+  return square_rooted;
+}
+
+/**
+ * @brief Returns the physical sound speed of a particle
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_soundspeed(const struct part *restrict p,
+                              const struct cosmology *cosmo) {
+
+  return cosmo->a_factor_sound_speed * p->force.soundspeed;
+}
+
+/**
+ * @brief Returns the comoving density of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_density(
+    const struct part *restrict p) {
+
+  return p->rho;
+}
+
+/**
+ * @brief Returns the comoving density of a particle.
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_density(
+    const struct part *restrict p, const struct cosmology *cosmo) {
+
+  return cosmo->a3_inv * p->rho;
+}
+
+/**
+ * @brief Returns the mass of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_mass(
+    const struct part *restrict p) {
+
+  return p->mass;
+}
+
+/**
+ * @brief Sets the mass of a particle
+ *
+ * @param p The particle of interest
+ * @param m The mass to set.
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_mass(
+    struct part *restrict p, float m) {
+
+  p->mass = m;
+}
+
+/**
+ * @brief Returns the velocities drifted to the current time of a particle.
+ *
+ * @param p The particle of interest
+ * @param xp The extended data of the particle.
+ * @param dt_kick_hydro The time (for hydro accelerations) since the last kick.
+ * @param dt_kick_grav The time (for gravity accelerations) since the last kick.
+ * @param v (return) The velocities at the current time.
+ */
+__attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
+    const struct part *restrict p, const struct xpart *xp, float dt_kick_hydro,
+    float dt_kick_grav, float v[3]) {
+
+  v[0] = xp->v_full[0] + p->a_hydro[0] * dt_kick_hydro +
+         xp->a_grav[0] * dt_kick_grav;
+  v[1] = xp->v_full[1] + p->a_hydro[1] * dt_kick_hydro +
+         xp->a_grav[1] * dt_kick_grav;
+  v[2] = xp->v_full[2] + p->a_hydro[2] * dt_kick_hydro +
+         xp->a_grav[2] * dt_kick_grav;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
+
+  return p->u_dt;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->u_dt * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
+
+  p->u_dt = du_dt;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->u_dt = du_dt / cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  const float comoving_entropy = entropy;
+  xp->u_full = gas_internal_energy_from_entropy(p->rho, comoving_entropy);
+}
+
+/**
+ * @brief Computes the hydro time-step of a given particle
+ *
+ * This function returns the time-step of a particle given its hydro-dynamical
+ * state. A typical time-step calculation would be the use of the CFL condition.
+ *
+ * @param p Pointer to the particle data
+ * @param xp Pointer to the extended particle data
+ * @param hydro_properties The SPH parameters
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_compute_timestep(
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct hydro_props *restrict hydro_properties,
+    const struct cosmology *restrict cosmo) {
+
+  const float CFL_condition = hydro_properties->CFL_condition;
+
+  /* CFL condition */
+  const float dt_cfl = 2.f * kernel_gamma * CFL_condition * cosmo->a * p->h /
+                       (cosmo->a_factor_sound_speed * p->viscosity.v_sig);
+
+  const float dt_u_change =
+      (p->u_dt != 0.0f) ? fabsf(const_max_u_change * p->u / p->u_dt) : FLT_MAX;
+
+  return fminf(dt_cfl, dt_u_change);
+}
+
+/**
+ * @brief Does some extra hydro operations once the actual physical time step
+ * for the particle is known.
+ *
+ * @param p The particle to act upon.
+ * @param dt Physical time step of the particle during the next step.
+ */
+__attribute__((always_inline)) INLINE static void hydro_timestep_extra(
+    struct part *p, float dt) {}
+
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
+/**
+ * @brief Prepares a particle for the density calculation.
+ *
+ * Zeroes all the relevant arrays in preparation for the sums taking place in
+ * the various density loop over neighbours. Typically, all fields of the
+ * density sub-structure of a particle get zeroed in here.
+ *
+ * @param p The particle to act upon
+ * @param hs #hydro_space containing hydro specific space information.
+ */
+__attribute__((always_inline)) INLINE static void hydro_init_part(
+    struct part *restrict p, const struct hydro_space *hs) {
+
+  p->density.wcount = 0.f;
+  p->density.wcount_dh = 0.f;
+  p->rho = 0.f;
+  p->density.rho_dh = 0.f;
+  p->pressure_bar = 0.f;
+  p->density.pressure_bar_dh = 0.f;
+
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
+
+  p->viscosity.div_v = 0.f;
+  p->diffusion.laplace_u = 0.f;
+}
+
+/**
+ * @brief Finishes the density calculation.
+ *
+ * Multiplies the density and number of neighbours by the appropiate constants
+ * and add the self-contribution term.
+ * Additional quantities such as velocity gradients will also get the final
+ * terms added to them here.
+ *
+ * Also adds/multiplies the cosmological terms if need be.
+ *
+ * @param p The particle to act upon
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_end_density(
+    struct part *restrict p, const struct cosmology *cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv);       /* 1/h^d */
+  const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */
+
+  /* Final operation on the density (add self-contribution). */
+  p->rho += p->mass * kernel_root;
+  p->density.rho_dh -= hydro_dimension * p->mass * kernel_root;
+  p->pressure_bar += p->mass * p->u * kernel_root;
+  p->density.pressure_bar_dh -= hydro_dimension * p->mass * p->u * kernel_root;
+  p->density.wcount += kernel_root;
+  p->density.wcount_dh -= hydro_dimension * kernel_root;
+
+  /* Finish the calculation by inserting the missing h-factors */
+  p->rho *= h_inv_dim;
+  p->density.rho_dh *= h_inv_dim_plus_one;
+  p->pressure_bar *= (h_inv_dim * hydro_gamma_minus_one);
+  p->density.pressure_bar_dh *= (h_inv_dim_plus_one * hydro_gamma_minus_one);
+  p->density.wcount *= h_inv_dim;
+  p->density.wcount_dh *= h_inv_dim_plus_one;
+
+  const float rho_inv = 1.f / p->rho;
+  const float a_inv2 = cosmo->a2_inv;
+
+  /* Finish calculation of the velocity curl components */
+  p->density.rot_v[0] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[1] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[2] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+
+  /* Finish calculation of the velocity divergence */
+  p->viscosity.div_v *=
+      h_inv_dim_plus_one * rho_inv * a_inv2 + cosmo->H * hydro_dimension;
+}
+
+/**
+ * @brief Prepare a particle for the gradient calculation.
+ *
+ * This function is called after the density loop and before the gradient loop.
+ *
+ * We use it to set the physical timestep for the particle and to copy the
+ * actual velocities, which we need to boost our interfaces during the flux
+ * calculation. We also initialize the variables used for the time step
+ * calculation.
+ *
+ * @param p The particle to act upon.
+ * @param xp The extended particle data to act upon.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_prepare_gradient(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  const float fac_B = cosmo->a_factor_Balsara_eps;
+
+  /* Compute the norm of the curl */
+  const float curl_v = sqrtf(p->density.rot_v[0] * p->density.rot_v[0] +
+                             p->density.rot_v[1] * p->density.rot_v[1] +
+                             p->density.rot_v[2] * p->density.rot_v[2]);
+
+  /* Compute the norm of div v */
+  const float abs_div_v = fabsf(p->viscosity.div_v);
+
+  /* Compute the sound speed -- see theory section for justification */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  /* Compute the Balsara switch */
+  const float balsara =
+      abs_div_v / (abs_div_v + curl_v + 0.0001f * soundspeed * fac_B / p->h);
+
+  /* Compute the "grad h" term */
+  const float common_factor = p->h / (hydro_dimension * p->density.wcount);
+  const float grad_h_term = (p->density.pressure_bar_dh * common_factor *
+                             hydro_one_over_gamma_minus_one) /
+                            (1.f + common_factor * p->density.wcount_dh);
+
+  /* Update variables. */
+  p->force.f = grad_h_term;
+  p->force.soundspeed = soundspeed;
+  p->force.balsara = balsara;
+}
+
+/**
+ * @brief Resets the variables that are required for a gradient calculation.
+ *
+ * This function is called after hydro_prepare_gradient.
+ *
+ * @param p The particle to act upon.
+ * @param xp The extended particle data to act upon.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_reset_gradient(
+    struct part *restrict p) {
+  p->viscosity.v_sig = 2.f * p->force.soundspeed;
+}
+
+/**
+ * @brief Finishes the gradient calculation.
+ *
+ * Just a wrapper around hydro_gradients_finalize, which can be an empty method,
+ * in which case no gradients are used.
+ *
+ * This method also initializes the force loop variables.
+ *
+ * @param p The particle to act upon.
+ */
+__attribute__((always_inline)) INLINE static void hydro_end_gradient(
+    struct part *p) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv);       /* 1/h^d */
+  const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */
+
+  /* Include the extra factors in the del^2 u */
+
+  p->diffusion.laplace_u *= 2 * h_inv_dim_plus_one;
+}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
+ *
+ * In the desperate case where a particle has no neighbours (likely because
+ * of the h_max ceiling), set the particle fields to something sensible to avoid
+ * NaNs in the next calculations.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                 /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv); /* 1/h^d */
+
+  /* Re-set problematic values */
+  p->rho = p->mass * kernel_root * h_inv_dim;
+  p->viscosity.v_sig = 0.f;
+  p->pressure_bar =
+      p->mass * p->u * hydro_gamma_minus_one * kernel_root * h_inv_dim;
+  p->density.wcount = kernel_root * h_inv_dim;
+  p->density.rho_dh = 0.f;
+  p->density.wcount_dh = 0.f;
+  p->density.pressure_bar_dh = 0.f;
+
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
+
+  /* Probably not shocking, so this is safe to do */
+  p->viscosity.div_v = 0.f;
+  p->diffusion.laplace_u = 0.f;
+}
+
+/**
+ * @brief Prepare a particle for the force calculation.
+ *
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
+ */
+__attribute__((always_inline)) INLINE static void hydro_prepare_force(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
+
+  /* Here we need to update the artificial viscosity */
+
+  /* Timescale for decay */
+  const float tau =
+      p->h / (2.f * p->viscosity.v_sig * hydro_props->viscosity.length);
+  /* Construct time differential of div.v implicitly */
+  const float div_v_dt =
+      dt_alpha == 0.f
+          ? 0.f
+          : (p->viscosity.div_v - p->viscosity.div_v_previous_step) / dt_alpha;
+  /* Construct the source term for the AV; if shock detected this is _positive_
+   * as div_v_dt should be _negative_ before the shock hits */
+  const float S = p->h * p->h * max(0.f, -1.f * div_v_dt);
+  const float v_sig_square = p->viscosity.v_sig * p->viscosity.v_sig;
+  /* Calculate the current appropriate value of the AV based on the above */
+  const float alpha_loc =
+      hydro_props->viscosity.alpha_max * S / (v_sig_square + S);
+
+  if (alpha_loc > p->viscosity.alpha) {
+    /* Reset the value of alpha to the appropriate value */
+    p->viscosity.alpha = alpha_loc;
+  } else {
+    /* Integrate the alpha forward in time to decay back to alpha = 0 */
+    const float alpha_dt = (alpha_loc - p->viscosity.alpha) / tau;
+
+    /* Finally, we can update the actual value of the alpha */
+    p->viscosity.alpha += alpha_dt * dt_alpha;
+  }
+
+  if (p->viscosity.alpha < hydro_props->viscosity.alpha_min) {
+    p->viscosity.alpha = hydro_props->viscosity.alpha_min;
+  }
+
+  /* Set our old div_v to the one for the next loop */
+  p->viscosity.div_v_previous_step = p->viscosity.div_v;
+
+  /* Now for the diffusive alpha */
+
+  const float sqrt_u = sqrtf(p->u);
+  /* Calculate initial value of alpha dt before bounding */
+  /* alpha_diff_dt is cosmology-less */
+  float alpha_diff_dt =
+      hydro_props->diffusion.beta * p->h * p->diffusion.laplace_u / sqrt_u;
+
+  float new_diffusion_alpha = p->diffusion.alpha + alpha_diff_dt * dt_alpha;
+
+  if (new_diffusion_alpha > hydro_props->diffusion.alpha_max) {
+    new_diffusion_alpha = hydro_props->diffusion.alpha_max;
+  } else if (new_diffusion_alpha < hydro_props->diffusion.alpha_min) {
+    new_diffusion_alpha = hydro_props->diffusion.alpha_min;
+  }
+
+  p->diffusion.alpha = new_diffusion_alpha;
+}
+
+/**
+ * @brief Reset acceleration fields of a particle
+ *
+ * Resets all hydro acceleration and time derivative fields in preparation
+ * for the sums taking  place in the various force tasks.
+ *
+ * @param p The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void hydro_reset_acceleration(
+    struct part *restrict p) {
+
+  /* Reset the acceleration. */
+  p->a_hydro[0] = 0.0f;
+  p->a_hydro[1] = 0.0f;
+  p->a_hydro[2] = 0.0f;
+
+  /* Reset the time derivatives. */
+  p->u_dt = 0.0f;
+  p->force.h_dt = 0.0f;
+}
+
+/**
+ * @brief Sets the values to be predicted in the drifts to their values at a
+ * kick time
+ *
+ * @param p The particle.
+ * @param xp The extended data of this particle.
+ */
+__attribute__((always_inline)) INLINE static void hydro_reset_predicted_values(
+    struct part *restrict p, const struct xpart *restrict xp) {
+
+  /* Re-set the predicted velocities */
+  p->v[0] = xp->v_full[0];
+  p->v[1] = xp->v_full[1];
+  p->v[2] = xp->v_full[2];
+
+  /* Re-set the entropy */
+  p->u = xp->u_full;
+}
+
+/**
+ * @brief Predict additional particle fields forward in time when drifting
+ *
+ * Additional hydrodynamic quantites are drifted forward in time here. These
+ * include thermal quantities (thermal energy or total energy or entropy, ...).
+ *
+ * Note the different time-step sizes used for the different quantities as they
+ * include cosmological factors.
+ *
+ * @param p The particle.
+ * @param xp The extended data of the particle.
+ * @param dt_drift The drift time-step for positions.
+ * @param dt_therm The drift time-step for thermal quantities.
+ */
+__attribute__((always_inline)) INLINE static void hydro_predict_extra(
+    struct part *restrict p, const struct xpart *restrict xp, float dt_drift,
+    float dt_therm) {
+
+  const float h_inv = 1.f / p->h;
+
+  /* Predict smoothing length */
+  const float w1 = p->force.h_dt * h_inv * dt_drift;
+  if (fabsf(w1) < 0.2f)
+    p->h *= approx_expf(w1); /* 4th order expansion of exp(w) */
+  else
+    p->h *= expf(w1);
+
+  /* Predict density and weighted pressure */
+  const float w2 = -hydro_dimension * w1;
+  if (fabsf(w2) < 0.2f) {
+    const float expf_approx =
+        approx_expf(w2); /* 4th order expansion of exp(w) */
+    p->rho *= expf_approx;
+    p->pressure_bar *= expf_approx;
+  } else {
+    const float expf_exact = expf(w2);
+    p->rho *= expf_exact;
+    p->pressure_bar *= expf_exact;
+  }
+
+  /* Predict the internal energy */
+  p->u += p->u_dt * dt_therm;
+
+  /* Compute the new sound speed */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  p->force.soundspeed = soundspeed;
+}
+
+/**
+ * @brief Finishes the force calculation.
+ *
+ * Multiplies the force and accelerations by the appropiate constants
+ * and add the self-contribution term. In most cases, there is little
+ * to do here.
+ *
+ * Cosmological terms are also added/multiplied here.
+ *
+ * @param p The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_end_force(
+    struct part *restrict p, const struct cosmology *cosmo) {
+
+  p->force.h_dt *= p->h * hydro_dimension_inv;
+}
+
+/**
+ * @brief Kick the additional variables
+ *
+ * Additional hydrodynamic quantites are kicked forward in time here. These
+ * include thermal quantities (thermal energy or total energy or entropy, ...).
+ *
+ * @param p The particle to act upon.
+ * @param xp The particle extended data to act upon.
+ * @param dt_therm The time-step for this kick (for thermodynamic quantities).
+ * @param dt_grav The time-step for this kick (for gravity quantities).
+ * @param dt_hydro The time-step for this kick (for hydro quantities).
+ * @param dt_kick_corr The time-step for this kick (for gravity corrections).
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme
+ */
+__attribute__((always_inline)) INLINE static void hydro_kick_extra(
+    struct part *restrict p, struct xpart *restrict xp, float dt_therm,
+    float dt_grav, float dt_hydro, float dt_kick_corr,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Do not decrease the energy by more than a factor of 2*/
+  if (dt_therm > 0. && p->u_dt * dt_therm < -0.5f * xp->u_full) {
+    p->u_dt = -0.5f * xp->u_full / dt_therm;
+  }
+  xp->u_full += p->u_dt * dt_therm;
+
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u_dt = 0.f;
+  }
+
+  /* Compute the sound speed */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  p->force.soundspeed = soundspeed;
+}
+
+/**
+ * @brief Converts hydro quantity of a particle at the start of a run
+ *
+ * This function is called once at the end of the engine_init_particle()
+ * routine (at the start of a calculation) after the densities of
+ * particles have been computed.
+ * This can be used to convert internal energy into entropy for instance.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle to act upon
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme.
+ */
+__attribute__((always_inline)) INLINE static void hydro_convert_quantities(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Convert the physcial internal energy to the comoving one. */
+  /* u' = a^(3(g-1)) u */
+  const float factor = 1.f / cosmo->a_factor_internal_energy;
+  p->u *= factor;
+  xp->u_full = p->u;
+
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u = min_energy;
+    p->u_dt = 0.f;
+  }
+
+  /* Note that unlike Minimal the pressure and sound speed cannot be calculated
+   * here because they are smoothed properties in this scheme. */
+
+  /* Set the initial value of the artificial viscosity based on the non-variable
+     schemes for safety */
+
+  p->viscosity.alpha = hydro_props->viscosity.alpha;
+  /* Initialise this here to keep all the AV variables together */
+  p->viscosity.div_v_previous_step = 0.f;
+
+  /* Set the initial values for the thermal diffusion */
+  p->diffusion.alpha = hydro_props->diffusion.alpha;
+}
+
+/**
+ * @brief Initialises the particles for the first time
+ *
+ * This function is called only once just after the ICs have been
+ * read in to do some conversions or assignments between the particle
+ * and extended particle fields.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ */
+__attribute__((always_inline)) INLINE static void hydro_first_init_part(
+    struct part *restrict p, struct xpart *restrict xp) {
+
+  p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
+  xp->v_full[0] = p->v[0];
+  xp->v_full[1] = p->v[1];
+  xp->v_full[2] = p->v[2];
+  xp->a_grav[0] = 0.f;
+  xp->a_grav[1] = 0.f;
+  xp->a_grav[2] = 0.f;
+  xp->u_full = p->u;
+
+  hydro_reset_acceleration(p);
+  hydro_init_part(p, NULL);
+}
+
+/**
+ * @brief Overwrite the initial internal energy of a particle.
+ *
+ * Note that in the cases where the thermodynamic variable is not
+ * internal energy but gets converted later, we must overwrite that
+ * field. The conversion to the actual variable happens later after
+ * the initial fake time-step.
+ *
+ * @param p The #part to write to.
+ * @param u_init The new initial internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_init_internal_energy(struct part *p, float u_init) {
+
+  p->u = u_init;
+}
+
+#endif /* SWIFT_ANARCHY_PU_HYDRO_H */
diff --git a/src/hydro/AnarchyPU/hydro_debug.h b/src/hydro/AnarchyPU/hydro_debug.h
new file mode 100644
index 0000000000000000000000000000000000000000..79ab5b96653a3f503c1baf255f4296f0ccc4aca9
--- /dev/null
+++ b/src/hydro/AnarchyPU/hydro_debug.h
@@ -0,0 +1,44 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ANARCHY_PU_HYDRO_DEBUG_H
+#define SWIFT_ANARCHY_PU_HYDRO_DEBUG_H
+/**
+ * @file PressureEnergy/hydro_debug.h
+ * @brief P-U conservative implementation of SPH (Debugging routines)
+ */
+
+__attribute__((always_inline)) INLINE static void hydro_debug_particle(
+    const struct part* p, const struct xpart* xp) {
+  printf(
+      "x=[%.3e,%.3e,%.3e], "
+      "v=[%.3e,%.3e,%.3e],v_full=[%.3e,%.3e,%.3e] \n a=[%.3e,%.3e,%.3e], "
+      "u=%.3e, du/dt=%.3e v_sig=%.3e, P=%.3e\n"
+      "h=%.3e, dh/dt=%.3e wcount=%d, m=%.3e, dh_drho=%.3e, rho=%.3e, \n"
+      "p_dh=%.3e, p_bar=%.3e, alpha=%.3e \n"
+      "time_bin=%d\n",
+      p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
+      xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
+      p->u, p->u_dt, p->viscosity.v_sig, hydro_get_comoving_pressure(p), p->h,
+      p->force.h_dt, (int)p->density.wcount, p->mass, p->density.rho_dh, p->rho,
+      p->density.pressure_bar_dh, p->pressure_bar, p->viscosity.alpha,
+      p->time_bin);
+}
+
+#endif /* SWIFT_ANARCHY_PU_HYDRO_DEBUG_H */
diff --git a/src/hydro/AnarchyPU/hydro_iact.h b/src/hydro/AnarchyPU/hydro_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..c214db3b018e00b7f3881fb301b55d6cf49a1f43
--- /dev/null
+++ b/src/hydro/AnarchyPU/hydro_iact.h
@@ -0,0 +1,577 @@
+/*******************************************************************************
+ * This file is part* of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ANARCHY_PU_HYDRO_IACT_H
+#define SWIFT_ANARCHY_PU_HYDRO_IACT_H
+
+/**
+ * @file PressureEnergy/hydro_iact.h
+ * @brief P-U implementation of SPH (Neighbour loop equations)
+ *
+ * The thermal variable is the internal energy (u). A simple constant
+ * viscosity term with a Balsara switch is implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "adiabatic_index.h"
+#include "minmax.h"
+
+/**
+ * @brief Density interaction between two part*icles.
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_density(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    struct part* pj, float a, float H) {
+
+  float wi, wj, wi_dx, wj_dx;
+  float dv[3], curlvr[3];
+
+  const float r = sqrtf(r2);
+
+  /* Get the masses. */
+  const float mi = pi->mass;
+  const float mj = pj->mass;
+
+  /* Compute density of pi. */
+  const float hi_inv = 1.f / hi;
+  const float ui = r * hi_inv;
+
+  kernel_deval(ui, &wi, &wi_dx);
+
+  pi->rho += mj * wi;
+  pi->density.rho_dh -= mj * (hydro_dimension * wi + ui * wi_dx);
+
+  pi->pressure_bar += mj * wi * pj->u;
+  pi->density.pressure_bar_dh -=
+      mj * pj->u * (hydro_dimension * wi + ui * wi_dx);
+  pi->density.wcount += wi;
+  pi->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  /* Compute density of pj. */
+  const float hj_inv = 1.f / hj;
+  const float uj = r * hj_inv;
+  kernel_deval(uj, &wj, &wj_dx);
+
+  pj->rho += mi * wj;
+  pj->density.rho_dh -= mi * (hydro_dimension * wj + uj * wj_dx);
+  pj->pressure_bar += mi * wj * pi->u;
+  pj->density.pressure_bar_dh -=
+      mi * pi->u * (hydro_dimension * wj + uj * wj_dx);
+  pj->density.wcount += wj;
+  pj->density.wcount_dh -= (hydro_dimension * wj + uj * wj_dx);
+
+  /* Now we need to compute the div terms */
+  const float r_inv = 1.f / r;
+  const float faci = mj * wi_dx * r_inv;
+  const float facj = mi * wj_dx * r_inv;
+
+  /* Compute dv dot r */
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->viscosity.div_v -= faci * dvdr;
+  pj->viscosity.div_v -= facj * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
+
+  /* Negative because of the change in sign of dx & dv. */
+  pj->density.rot_v[0] += facj * curlvr[0];
+  pj->density.rot_v[1] += facj * curlvr[1];
+  pj->density.rot_v[2] += facj * curlvr[2];
+}
+
+/**
+ * @brief Density interaction between two part*icles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_density(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    const struct part* pj, float a, float H) {
+
+  float wi, wi_dx;
+  float dv[3], curlvr[3];
+
+  /* Get the masses. */
+  const float mj = pj->mass;
+
+  /* Get r and r inverse. */
+  const float r = sqrtf(r2);
+
+  const float h_inv = 1.f / hi;
+  const float ui = r * h_inv;
+  kernel_deval(ui, &wi, &wi_dx);
+
+  pi->rho += mj * wi;
+  pi->density.rho_dh -= mj * (hydro_dimension * wi + ui * wi_dx);
+
+  pi->pressure_bar += mj * wi * pj->u;
+
+  pi->density.pressure_bar_dh -=
+      mj * pj->u * (hydro_dimension * wi + ui * wi_dx);
+  pi->density.wcount += wi;
+  pi->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  const float r_inv = 1.f / r;
+  const float faci = mj * wi_dx * r_inv;
+
+  /* Compute dv dot r */
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->viscosity.div_v -= faci * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
+}
+
+/**
+ * @brief Calculate the gradient interaction between particle i and particle j
+ *
+ * This method wraps around hydro_gradients_collect, which can be an empty
+ * method, in which case no gradients are used.
+ *
+ * @param r2 Comoving squared distance between particle i and particle j.
+ * @param dx Comoving distance vector between the particles (dx = pi->x -
+ * pj->x).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi Particle i.
+ * @param pj Particle j.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_gradient(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* We need to construct the maximal signal velocity between our particle
+   * and all of it's neighbours */
+
+  const float dv_dx = (pi->v[0] - pj->v[0]) * dx[0] +
+                      (pi->v[1] - pj->v[1]) * dx[1] +
+                      (pi->v[2] - pj->v[2]) * dx[2];
+
+  const float dv_dx_factor = min(0, const_viscosity_beta * dv_dx);
+
+  const float new_v_sig =
+      pi->force.soundspeed + pj->force.soundspeed - dv_dx_factor;
+
+  /* Update if we need to */
+  pi->viscosity.v_sig = max(pi->viscosity.v_sig, new_v_sig);
+  pj->viscosity.v_sig = max(pj->viscosity.v_sig, new_v_sig);
+
+  /* Calculate Del^2 u for the thermal diffusion coefficient. */
+  /* Need to get some kernel values F_ij = wi_dx */
+  float wi, wi_dx, wj, wj_dx;
+
+  const float r = sqrtf(r2);
+  const float ui = r / hi;
+  const float uj = r / hj;
+
+  kernel_deval(ui, &wi, &wi_dx);
+  kernel_deval(uj, &wj, &wj_dx);
+
+  const float delta_u_factor = (pi->u - pj->u) / r;
+  pi->diffusion.laplace_u += pj->mass * delta_u_factor * wi_dx / pj->rho;
+  pj->diffusion.laplace_u -= pi->mass * delta_u_factor * wj_dx / pi->rho;
+}
+
+/**
+ * @brief Calculate the gradient interaction between particle i and particle j:
+ * non-symmetric version
+ *
+ * This method wraps around hydro_gradients_nonsym_collect, which can be an
+ * empty method, in which case no gradients are used.
+ *
+ * @param r2 Comoving squared distance between particle i and particle j.
+ * @param dx Comoving distance vector between the particles (dx = pi->x -
+ * pj->x).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi Particle i.
+ * @param pj Particle j.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_gradient(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* We need to construct the maximal signal velocity between our particle
+   * and all of it's neighbours */
+
+  const float dv_dx = (pi->v[0] - pj->v[0]) * dx[0] +
+                      (pi->v[1] - pj->v[1]) * dx[1] +
+                      (pi->v[2] - pj->v[2]) * dx[2];
+
+  const float dv_dx_factor = min(0, const_viscosity_beta * dv_dx);
+
+  const float new_v_sig =
+      pi->force.soundspeed + pj->force.soundspeed - dv_dx_factor;
+
+  /* Update if we need to */
+  pi->viscosity.v_sig = max(pi->viscosity.v_sig, new_v_sig);
+
+  /* Calculate Del^2 u for the thermal diffusion coefficient. */
+  /* Need to get some kernel values F_ij = wi_dx */
+  float wi, wi_dx;
+
+  const float r = sqrtf(r2);
+  const float ui = r / hi;
+
+  kernel_deval(ui, &wi, &wi_dx);
+
+  const float delta_u_factor = (pi->u - pj->u) / r;
+  pi->diffusion.laplace_u += pj->mass * delta_u_factor * wi_dx / pj->rho;
+}
+
+/**
+ * @brief Force interaction between two part*icles.
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_force(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    struct part* pj, float a, float H) {
+
+  /* Cosmological factors entering the EoMs */
+  const float fac_mu = pow_three_gamma_minus_five_over_two(a);
+  const float a2_Hubble = a * a * H;
+
+  const float r = sqrtf(r2);
+  const float r_inv = 1.0f / r;
+
+  /* Recover some data */
+  const float mj = pj->mass;
+  const float mi = pi->mass;
+
+  const float miui = mi * pi->u;
+  const float mjuj = mj * pj->u;
+
+  const float rhoi = pi->rho;
+  const float rhoj = pj->rho;
+  /* Compute gradient terms */
+  const float f_ij = 1.f - (pi->force.f / mjuj);
+  const float f_ji = 1.f - (pj->force.f / miui);
+
+  /* Get the kernel for hi. */
+  const float hi_inv = 1.0f / hi;
+  const float hid_inv = pow_dimension_plus_one(hi_inv); /* 1/h^(d+1) */
+  const float xi = r * hi_inv;
+  float wi, wi_dx;
+  kernel_deval(xi, &wi, &wi_dx);
+  const float wi_dr = hid_inv * wi_dx;
+
+  /* Get the kernel for hj. */
+  const float hj_inv = 1.0f / hj;
+  const float hjd_inv = pow_dimension_plus_one(hj_inv); /* 1/h^(d+1) */
+  const float xj = r * hj_inv;
+  float wj, wj_dx;
+  kernel_deval(xj, &wj, &wj_dx);
+  const float wj_dr = hjd_inv * wj_dx;
+
+  /* Compute dv dot r. */
+  const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
+                     (pi->v[1] - pj->v[1]) * dx[1] +
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
+
+  /* Are the part*icles moving towards each others ? */
+  const float omega_ij = min(dvdr_Hubble, 0.f);
+  const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
+
+  /* Compute sound speeds and signal velocity */
+  const float v_sig = 0.5 * (pi->viscosity.v_sig + pj->viscosity.v_sig);
+
+  /* Balsara term */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
+
+  /* Construct the full viscosity term */
+  const float rho_ij = rhoi + rhoj;
+  const float alpha = pi->viscosity.alpha + pj->viscosity.alpha;
+  const float visc =
+      -0.25f * alpha * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
+
+  /* Convolve with the kernel */
+  const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
+
+  /* SPH acceleration term */
+  const float sph_acc_term =
+      pj->u * pi->u * hydro_gamma_minus_one * hydro_gamma_minus_one *
+      ((f_ij / pi->pressure_bar) * wi_dr + (f_ji / pj->pressure_bar) * wj_dr) *
+      r_inv;
+
+  /* Assemble the acceleration */
+  const float acc = sph_acc_term + visc_acc_term;
+
+  /* Use the force Luke ! */
+  pi->a_hydro[0] -= mj * acc * dx[0];
+  pi->a_hydro[1] -= mj * acc * dx[1];
+  pi->a_hydro[2] -= mj * acc * dx[2];
+
+  pj->a_hydro[0] += mi * acc * dx[0];
+  pj->a_hydro[1] += mi * acc * dx[1];
+  pj->a_hydro[2] += mi * acc * dx[2];
+
+  /* Get the time derivative for u. */
+  const float sph_du_term_i = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pj->u * pi->u * (f_ij / pi->pressure_bar) *
+                              wi_dr * dvdr * r_inv;
+  const float sph_du_term_j = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pi->u * pj->u * (f_ji / pj->pressure_bar) *
+                              wj_dr * dvdr * r_inv;
+
+  /* Viscosity term */
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
+
+  /* Diffusion term */
+  const float v_diff =
+      max(pi->force.soundspeed + pj->force.soundspeed + dvdr_Hubble, 0.f);
+  const float alpha_diff = 0.5 * (pi->diffusion.alpha + pj->diffusion.alpha);
+  /* wi_dx + wj_dx / 2 is F_ij */
+  const float diff_du_term =
+      alpha_diff * fac_mu * v_diff * (pi->u - pj->u) * (wi_dr + wj_dr) / rho_ij;
+
+  /* Assemble the energy equation term */
+  const float du_dt_i = sph_du_term_i + visc_du_term + diff_du_term;
+  const float du_dt_j = sph_du_term_j + visc_du_term - diff_du_term;
+
+  /* Internal energy time derivative */
+  pi->u_dt += du_dt_i * mj;
+  pj->u_dt += du_dt_j * mi;
+
+  /* Get the time derivative for h. */
+  pi->force.h_dt -= mj * dvdr * r_inv / rhoj * wi_dr;
+  pj->force.h_dt -= mi * dvdr * r_inv / rhoi * wj_dr;
+}
+
+/**
+ * @brief Force interaction between two part*icles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    const struct part* pj, float a, float H) {
+
+  /* Cosmological factors entering the EoMs */
+  const float fac_mu = pow_three_gamma_minus_five_over_two(a);
+  const float a2_Hubble = a * a * H;
+
+  const float r = sqrtf(r2);
+  const float r_inv = 1.0f / r;
+
+  /* Recover some data */
+  // const float mi = pi->mass;
+  const float mj = pj->mass;
+  const float mi = pi->mass;
+
+  const float miui = mi * pi->u;
+  const float mjuj = mj * pj->u;
+
+  const float rhoi = pi->rho;
+  const float rhoj = pj->rho;
+  /* Compute gradient terms */
+  const float f_ij = 1.f - (pi->force.f / mjuj);
+  const float f_ji = 1.f - (pj->force.f / miui);
+
+  /* Get the kernel for hi. */
+  const float hi_inv = 1.0f / hi;
+  const float hid_inv = pow_dimension_plus_one(hi_inv); /* 1/h^(d+1) */
+  const float xi = r * hi_inv;
+  float wi, wi_dx;
+  kernel_deval(xi, &wi, &wi_dx);
+  const float wi_dr = hid_inv * wi_dx;
+
+  /* Get the kernel for hj. */
+  const float hj_inv = 1.0f / hj;
+  const float hjd_inv = pow_dimension_plus_one(hj_inv); /* 1/h^(d+1) */
+  const float xj = r * hj_inv;
+  float wj, wj_dx;
+  kernel_deval(xj, &wj, &wj_dx);
+  const float wj_dr = hjd_inv * wj_dx;
+
+  /* Compute dv dot r. */
+  const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
+                     (pi->v[1] - pj->v[1]) * dx[1] +
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
+
+  /* Are the part*icles moving towards each others ? */
+  const float omega_ij = min(dvdr_Hubble, 0.f);
+  const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
+
+  /* Compute sound speeds and signal velocity */
+  const float v_sig = 0.5 * (pi->viscosity.v_sig + pj->viscosity.v_sig);
+
+  /* Balsara term */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
+
+  /* Construct the full viscosity term */
+  const float rho_ij = rhoi + rhoj;
+  const float alpha = pi->viscosity.alpha + pj->viscosity.alpha;
+  const float visc =
+      -0.25f * alpha * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
+
+  /* Convolve with the kernel */
+  const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
+
+  /* SPH acceleration term */
+  const float sph_acc_term =
+      pj->u * pi->u * hydro_gamma_minus_one * hydro_gamma_minus_one *
+      ((f_ij / pi->pressure_bar) * wi_dr + (f_ji / pj->pressure_bar) * wj_dr) *
+      r_inv;
+
+  /* Assemble the acceleration */
+  const float acc = sph_acc_term + visc_acc_term;
+
+  /* Use the force Luke ! */
+  pi->a_hydro[0] -= mj * acc * dx[0];
+  pi->a_hydro[1] -= mj * acc * dx[1];
+  pi->a_hydro[2] -= mj * acc * dx[2];
+
+  /* Get the time derivative for u. */
+  const float sph_du_term_i = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pj->u * pi->u * (f_ij / pi->pressure_bar) *
+                              wi_dr * dvdr * r_inv;
+
+  /* Viscosity term */
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
+
+  /* Diffusion term */
+  const float v_diff =
+      max(pi->force.soundspeed + pj->force.soundspeed + dvdr_Hubble, 0.f);
+  const float alpha_diff = 0.5 * (pi->diffusion.alpha + pj->diffusion.alpha);
+  /* wi_dx + wj_dx / 2 is F_ij */
+  const float diff_du_term =
+      alpha_diff * fac_mu * v_diff * (pi->u - pj->u) * (wi_dr + wj_dr) / rho_ij;
+
+  /* Assemble the energy equation term */
+  const float du_dt_i = sph_du_term_i + visc_du_term + diff_du_term;
+
+  /* Internal energy time derivative */
+  pi->u_dt += du_dt_i * mj;
+
+  /* Get the time derivative for h. */
+  pi->force.h_dt -= mj * dvdr * r_inv / rhoj * wi_dr;
+}
+
+/**
+ * @brief Timestep limiter loop
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ *
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ *
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->viscosity.v_sig >
+      const_limiter_max_v_sig_ratio * pj->viscosity.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
+#endif /* SWIFT_MINIMAL_HYDRO_IACT_H */
diff --git a/src/hydro/AnarchyPU/hydro_io.h b/src/hydro/AnarchyPU/hydro_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..b2a411cf408a2067ea5490869be004f9fd26954d
--- /dev/null
+++ b/src/hydro/AnarchyPU/hydro_io.h
@@ -0,0 +1,224 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ANARCHY_PU_HYDRO_IO_H
+#define SWIFT_ANARCHY_PU_HYDRO_IO_H
+/**
+ * @file PressureEnergy/hydro_io.h
+ * @brief P-U implementation of SPH (i/o routines)
+ *
+ * The thermal variable is the internal energy (u). A simple constant
+ * viscosity term with a Balsara switch is implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "adiabatic_index.h"
+#include "hydro.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
+
+/**
+ * @brief Specifies which particle fields to read from a dataset
+ *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
+ */
+INLINE static void hydro_read_particles(struct part* parts,
+                                        struct io_props* list,
+                                        int* num_fields) {
+
+  *num_fields = 8;
+
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, parts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                parts, mass);
+  list[3] = io_make_input_field("SmoothingLength", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, h);
+  list[4] = io_make_input_field("InternalEnergy", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_input_field("Accelerations", FLOAT, 3, OPTIONAL,
+                                UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_DENSITY, parts, rho);
+}
+
+INLINE static void convert_S(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_entropy(p, xp);
+}
+
+INLINE static void convert_P(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_pressure(p);
+}
+
+INLINE static void convert_part_pos(const struct engine* e,
+                                    const struct part* p,
+                                    const struct xpart* xp, double* ret) {
+
+  if (e->s->periodic) {
+    ret[0] = box_wrap(p->x[0], 0.0, e->s->dim[0]);
+    ret[1] = box_wrap(p->x[1], 0.0, e->s->dim[1]);
+    ret[2] = box_wrap(p->x[2], 0.0, e->s->dim[2]);
+  } else {
+    ret[0] = p->x[0];
+    ret[1] = p->x[1];
+    ret[2] = p->x[2];
+  }
+}
+
+INLINE static void convert_part_vel(const struct engine* e,
+                                    const struct part* p,
+                                    const struct xpart* xp, float* ret) {
+
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const struct cosmology* cosmo = e->cosmology;
+  const integertime_t ti_current = e->ti_current;
+  const double time_base = e->time_base;
+
+  const integertime_t ti_beg = get_integer_time_begin(ti_current, p->time_bin);
+  const integertime_t ti_end = get_integer_time_end(ti_current, p->time_bin);
+
+  /* Get time-step since the last kick */
+  float dt_kick_grav, dt_kick_hydro;
+  if (with_cosmology) {
+    dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current);
+    dt_kick_grav -=
+        cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2);
+    dt_kick_hydro = cosmology_get_hydro_kick_factor(cosmo, ti_beg, ti_current);
+    dt_kick_hydro -=
+        cosmology_get_hydro_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2);
+  } else {
+    dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base;
+    dt_kick_hydro = (ti_current - ((ti_beg + ti_end) / 2)) * time_base;
+  }
+
+  /* Extrapolate the velocites to the current time */
+  hydro_get_drifted_velocities(p, xp, dt_kick_hydro, dt_kick_grav, ret);
+
+  /* Conversion from internal units to peculiar velocities */
+  ret[0] *= cosmo->a_inv;
+  ret[1] *= cosmo->a_inv;
+  ret[2] *= cosmo->a_inv;
+}
+
+INLINE static void convert_part_potential(const struct engine* e,
+                                          const struct part* p,
+                                          const struct xpart* xp, float* ret) {
+  if (p->gpart != NULL)
+    ret[0] = gravity_get_comoving_potential(p->gpart);
+  else
+    ret[0] = 0.f;
+}
+
+INLINE static void convert_viscosity(const struct engine* e,
+                                     const struct part* p,
+                                     const struct xpart* xp, float* ret) {
+  ret[0] = p->viscosity.alpha;
+}
+
+INLINE static void convert_diffusion(const struct engine* e,
+                                     const struct part* p,
+                                     const struct xpart* xp, float* ret) {
+  ret[0] = p->diffusion.alpha;
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
+ */
+INLINE static void hydro_write_particles(const struct part* parts,
+                                         const struct xpart* xparts,
+                                         struct io_props* list,
+                                         int* num_fields) {
+
+  *num_fields = 12;
+
+  /* List what we want to write */
+  list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3,
+                                              UNIT_CONV_LENGTH, parts, xparts,
+                                              convert_part_pos);
+  list[1] = io_make_output_field_convert_part(
+      "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
+  list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 parts, h);
+  list[4] = io_make_output_field("InternalEnergy", FLOAT, 1,
+                                 UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+  list[6] =
+      io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
+  list[7] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE,
+                                 parts, pressure_bar);
+  list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1,
+                                              UNIT_CONV_ENTROPY_PER_UNIT_MASS,
+                                              parts, xparts, convert_S);
+  list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1,
+                                              UNIT_CONV_POTENTIAL, parts,
+                                              xparts, convert_part_potential);
+  list[10] = io_make_output_field_convert_part("Viscosity", FLOAT, 1,
+                                               UNIT_CONV_NO_UNITS, parts,
+                                               xparts, convert_viscosity);
+  list[11] = io_make_output_field_convert_part("Diffusion", FLOAT, 1,
+                                               UNIT_CONV_NO_UNITS, parts,
+                                               xparts, convert_diffusion);
+}
+
+/**
+ * @brief Writes the current model of SPH to the file
+ * @param h_grpsph The HDF5 group in which to write
+ */
+INLINE static void hydro_write_flavour(hid_t h_grpsph) {
+
+  /* Viscosity and thermal conduction */
+  /* Nothing in this minimal model... */
+  io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment");
+  io_write_attribute_s(h_grpsph, "Viscosity Model",
+                       "Simplified version of Cullen & Denhen (2011)");
+
+  /* Time integration properties */
+  io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt",
+                       const_max_u_change);
+}
+
+/**
+ * @brief Are we writing entropy in the internal energy field ?
+ *
+ * @return 1 if entropy is in 'internal energy', 0 otherwise.
+ */
+INLINE static int writeEntropyFlag(void) { return 0; }
+
+#endif /* SWIFT_ANARCHY_PU_HYDRO_IO_H */
diff --git a/src/hydro/AnarchyPU/hydro_part.h b/src/hydro/AnarchyPU/hydro_part.h
new file mode 100644
index 0000000000000000000000000000000000000000..2c5022c262587c50f577970e1d1891a42b70491b
--- /dev/null
+++ b/src/hydro/AnarchyPU/hydro_part.h
@@ -0,0 +1,218 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_ANARCHY_PU_HYDRO_PART_H
+#define SWIFT_ANARCHY_PU_HYDRO_PART_H
+/**
+ * @file PressureEnergy/hydro_part.h
+ * @brief P-U implementation of SPH (Particle definition)
+ *
+ * The thermal variable is the internal energy (u). A simple constant
+ * viscosity term with a Balsara switch is implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "chemistry_struct.h"
+#include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Particle fields not needed during the SPH loops over neighbours.
+ *
+ * This structure contains the particle fields that are not used in the
+ * density or force loops. Quantities should be used in the kick, drift and
+ * potentially ghost tasks only.
+ */
+struct xpart {
+
+  /*! Offset between current position and position at last tree rebuild. */
+  float x_diff[3];
+
+  /*! Offset between the current position and position at the last sort. */
+  float x_diff_sort[3];
+
+  /*! Velocity at the last full step. */
+  float v_full[3];
+
+  /*! Gravitational acceleration at the last full step. */
+  float a_grav[3];
+
+  /*! Internal energy at the last full step. */
+  float u_full;
+
+  /*! Additional data used to record cooling information */
+  struct cooling_xpart_data cooling_data;
+
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the tracers */
+  struct star_formation_xpart_data sf_data;
+
+} SWIFT_STRUCT_ALIGN;
+
+/**
+ * @brief Particle fields for the SPH particles
+ *
+ * The density and force substructures are used to contain variables only used
+ * within the density and force loops over neighbours. All more permanent
+ * variables should be declared in the main part of the part structure,
+ */
+struct part {
+
+  /*! Particle unique ID. */
+  long long id;
+
+  /*! Pointer to corresponding gravity part. */
+  struct gpart* gpart;
+
+  /*! Particle position. */
+  double x[3];
+
+  /*! Particle predicted velocity. */
+  float v[3];
+
+  /*! Particle acceleration. */
+  float a_hydro[3];
+
+  /*! Particle mass. */
+  float mass;
+
+  /*! Particle smoothing length. */
+  float h;
+
+  /*! Particle internal energy. */
+  float u;
+
+  /*! Time derivative of the internal energy. */
+  float u_dt;
+
+  /*! Particle density. */
+  float rho;
+
+  /*! Particle pressure (weighted) */
+  float pressure_bar;
+
+  /* Store viscosity information in a separate struct. */
+  struct {
+
+    /*! Particle velocity divergence */
+    float div_v;
+
+    /*! Particle velocity divergence from previous step */
+    float div_v_previous_step;
+
+    /*! Artificial viscosity parameter */
+    float alpha;
+
+    /*! Signal velocity */
+    float v_sig;
+
+  } viscosity;
+
+  /* Store thermal diffusion information in a separate struct. */
+  struct {
+
+    /*! del^2 u, a smoothed quantity */
+    float laplace_u;
+
+    /*! Thermal diffusion coefficient */
+    float alpha;
+
+  } diffusion;
+
+  /* Store density/force specific stuff. */
+  union {
+
+    /**
+     * @brief Structure for the variables only used in the density loop over
+     * neighbours.
+     *
+     * Quantities in this sub-structure should only be accessed in the density
+     * loop over neighbours and the ghost task.
+     */
+    struct {
+
+      /*! Neighbour number count. */
+      float wcount;
+
+      /*! Derivative of the neighbour number with respect to h. */
+      float wcount_dh;
+
+      /*! Derivative of density with respect to h */
+      float rho_dh;
+
+      /*! Derivative of the weighted pressure with respect to h */
+      float pressure_bar_dh;
+
+      /*! Particle velocity curl. */
+      float rot_v[3];
+
+    } density;
+
+    /**
+     * @brief Structure for the variables only used in the force loop over
+     * neighbours.
+     *
+     * Quantities in this sub-structure should only be accessed in the force
+     * loop over neighbours and the ghost, drift and kick tasks.
+     */
+    struct {
+
+      /*! "Grad h" term -- only partial in P-U */
+      float f;
+
+      /*! Particle soundspeed. */
+      float soundspeed;
+
+      /*! Time derivative of smoothing length  */
+      float h_dt;
+
+      /*! Balsara switch */
+      float balsara;
+
+    } force;
+  };
+
+  /* Chemistry information */
+  struct chemistry_part_data chemistry_data;
+
+  /*! Time-step length */
+  timebin_t time_bin;
+
+  /* Need waking up ? */
+  timebin_t wakeup;
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Time of the last drift */
+  integertime_t ti_drift;
+
+  /* Time of the last kick */
+  integertime_t ti_kick;
+
+#endif
+
+} SWIFT_STRUCT_ALIGN;
+
+#endif /* SWIFT_ANARCHY_PU_HYDRO_PART_H */
diff --git a/src/hydro/Default/hydro.h b/src/hydro/Default/hydro.h
index 237a34283e1b89b06a4de1d6f1890f9a7e1af509..2b1d19bc916889a5cfdc40b1357f1e3dfe9388af 100644
--- a/src/hydro/Default/hydro.h
+++ b/src/hydro/Default/hydro.h
@@ -32,27 +32,59 @@
 #include <float.h>
 
 /**
- * @brief Returns the comoving internal energy of a particle
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return p->u;
+  return xp->u_full;
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
-  return cosmo->a_factor_internal_energy * p->u;
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
+  return p->u * cosmo->a_factor_internal_energy;
 }
 
 /**
@@ -80,24 +112,57 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
 }
 
 /**
- * @brief Returns the comoving entropy of a particle
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return gas_entropy_from_internal_energy(p->rho, p->u);
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
 }
 
 /**
- * @brief Returns the physical entropy of a particle
+ * @brief Returns the physical entropy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -201,12 +266,27 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return p->force.u_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->force.u_dt * cosmo->a_factor_internal_energy;
+}
+
 /**
  * @brief Returns the time derivative of internal energy of a particle
  *
@@ -215,12 +295,29 @@ __attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
  * @param p The particle of interest.
  * @param du_dt The new time derivative of the internal energy.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
 
   p->force.u_dt = du_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->force.u_dt = du_dt * cosmo->a_factor_internal_energy;
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -351,16 +448,24 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
 /**
  * @brief Prepare a particle for the force calculation.
  *
- * Computes viscosity term, conduction term and smoothing length gradient terms.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
-
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
   const float fac_mu = cosmo->a_factor_mu;
 
   /* Some smoothing length multiples. */
@@ -389,6 +494,9 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
   p->force.balsara =
       normDiv_v / (normDiv_v + normRot_v + 0.0001f * fac_mu * fc * h_inv);
 
+  /* Set the AV property */
+  p->alpha = hydro_props->viscosity.alpha;
+
   /* Viscosity parameter decay time */
   /* const float tau = h / (2.f * const_viscosity_length * p->force.soundspeed);
    */
@@ -397,8 +505,10 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
   /* const float S = max(-normDiv_v, 0.f); */
 
   /* Compute the particle's viscosity parameter time derivative */
-  /* const float alpha_dot = (const_viscosity_alpha_min - p->alpha) / tau + */
-  /*                         (const_viscosity_alpha_max - p->alpha) * S; */
+  /* const float alpha_dot = (hydro_props->viscosity.alpha_max) - p->alpha) /
+   * tau + */
+  /*                         (hydro_props->viscosity.alpha_max - p->alpha) * S;
+   */
 
   /* Update particle's viscosity paramter */
   /* p->alpha += alpha_dot * (p->ti_end - p->ti_begin) * timeBase; */  // MATTHIEU
@@ -520,7 +630,7 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {}
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {}
 
 /**
  * @brief Initialises the particles for the first time
@@ -535,6 +645,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
   xp->v_full[2] = p->v[2];
@@ -564,4 +675,14 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->u = u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
 #endif /* SWIFT_DEFAULT_HYDRO_H */
diff --git a/src/hydro/Default/hydro_debug.h b/src/hydro/Default/hydro_debug.h
index 3be9c9e1760591423edbd218d19b46ddf9aad01e..68367beaee97c285057cb055c1fbdbba5c370085 100644
--- a/src/hydro/Default/hydro_debug.h
+++ b/src/hydro/Default/hydro_debug.h
@@ -25,10 +25,11 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "x=[%.3e,%.3e,%.3e], "
       "v=[%.3e,%.3e,%.3e],v_full=[%.3e,%.3e,%.3e] \n a=[%.3e,%.3e,%.3e],\n "
       "h=%.3e, "
-      "wcount=%d, m=%.3e, dh_drho=%.3e, rho=%.3e, time_bin=%d\n",
+      "wcount=%d, m=%.3e, dh_drho=%.3e, rho=%.3e, time_bin=%d wakeup=%d\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
-      p->h, (int)p->density.wcount, p->mass, p->rho_dh, p->rho, p->time_bin);
+      p->h, (int)p->density.wcount, p->mass, p->rho_dh, p->rho, p->time_bin,
+      p->wakeup);
 }
 
 #endif /* SWIFT_DEFAULT_HYDRO_DEBUG_H */
diff --git a/src/hydro/Default/hydro_iact.h b/src/hydro/Default/hydro_iact.h
index 658b4aba83085610a49bb9d2579d4f20c70d6c5b..85c586a4e921e38296453b71a2a2b9637971c28c 100644
--- a/src/hydro/Default/hydro_iact.h
+++ b/src/hydro/Default/hydro_iact.h
@@ -226,7 +226,8 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   omega_ij = min(fac_mu * dvdr, 0.f);
 
   /* Compute signal velocity */
-  v_sig = pi->force.soundspeed + pj->force.soundspeed - 2.0f * omega_ij;
+  v_sig = pi->force.soundspeed + pj->force.soundspeed -
+          const_viscosity_beta * omega_ij;
 
   /* Compute viscosity parameter */
   alpha_ij = -0.5f * (pi->alpha + pj->alpha);
@@ -335,7 +336,8 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   omega_ij = min(fac_mu * dvdr, 0.f);
 
   /* Compute signal velocity */
-  v_sig = pi->force.soundspeed + pj->force.soundspeed - 2.0f * omega_ij;
+  v_sig = pi->force.soundspeed + pj->force.soundspeed -
+          const_viscosity_beta * omega_ij;
 
   /* Compute viscosity parameter */
   alpha_ij = -0.5f * (pi->alpha + pj->alpha);
@@ -376,4 +378,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   pi->force.v_sig = max(pi->force.v_sig, v_sig);
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_DEFAULT_HYDRO_IACT_H */
diff --git a/src/hydro/Default/hydro_io.h b/src/hydro/Default/hydro_io.h
index d47c96fbf32e1ee00346888aaf2e8afabc22abc3..69919c202223fdecc197a87178e59767c02ee16e 100644
--- a/src/hydro/Default/hydro_io.h
+++ b/src/hydro/Default/hydro_io.h
@@ -55,6 +55,17 @@ INLINE static void hydro_read_particles(struct part* parts,
   list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
                                 UNIT_CONV_DENSITY, parts, rho);
 }
+INLINE static void convert_S(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_entropy(p, xp);
+}
+
+INLINE static void convert_P(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_pressure(p);
+}
 
 INLINE static void convert_part_pos(const struct engine* e,
                                     const struct part* p,
@@ -128,7 +139,7 @@ INLINE static void hydro_write_particles(const struct part* parts,
                                          struct io_props* list,
                                          int* num_fields) {
 
-  *num_fields = 8;
+  *num_fields = 10;
 
   /* List what we want to write */
   list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3,
@@ -146,7 +157,13 @@ INLINE static void hydro_write_particles(const struct part* parts,
                                  UNIT_CONV_NO_UNITS, parts, id);
   list[6] =
       io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
-  list[7] = io_make_output_field_convert_part("Potential", FLOAT, 1,
+  list[7] = io_make_output_field_convert_part("Entropy", FLOAT, 1,
+                                              UNIT_CONV_ENTROPY_PER_UNIT_MASS,
+                                              parts, xparts, convert_S);
+  list[8] = io_make_output_field_convert_part(
+      "Pressure", FLOAT, 1, UNIT_CONV_PRESSURE, parts, xparts, convert_P);
+
+  list[0] = io_make_output_field_convert_part("Potential", FLOAT, 1,
                                               UNIT_CONV_POTENTIAL, parts,
                                               xparts, convert_part_potential);
 }
@@ -166,13 +183,6 @@ INLINE static void hydro_write_flavour(hid_t h_grpsph) {
       h_grpsph, "Viscosity Model",
       "Morris & Monaghan (1997), Rosswog, Davies, Thielemann & "
       "Piran (2000) with additional Balsara (1995) switch");
-  io_write_attribute_f(h_grpsph, "Viscosity alpha_min",
-                       const_viscosity_alpha_min);
-  io_write_attribute_f(h_grpsph, "Viscosity alpha_max",
-                       const_viscosity_alpha_max);
-  io_write_attribute_f(h_grpsph, "Viscosity beta", 2.f);
-  io_write_attribute_f(h_grpsph, "Viscosity decay length",
-                       const_viscosity_length);
 
   /* Time integration properties */
   io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt",
diff --git a/src/hydro/Default/hydro_part.h b/src/hydro/Default/hydro_part.h
index 2a18e03cb533ca860f227a31152ef2058e0dd37d..21c0269f78c85b7d11ab5e838d45614161aee013 100644
--- a/src/hydro/Default/hydro_part.h
+++ b/src/hydro/Default/hydro_part.h
@@ -21,6 +21,7 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "tracers_struct.h"
 
 /* Extra particle data not needed during the SPH loops over neighbours. */
 struct xpart {
@@ -40,6 +41,9 @@ struct xpart {
   /* Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
   float u_full;
 
   /* Old density. */
@@ -132,6 +136,9 @@ struct part {
   /* Particle time-bin */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/Gadget2/hydro.h b/src/hydro/Gadget2/hydro.h
index 4511b2d655b0e7b3293633a466c76757a0237874..9994be7dc6051b033c052d32a4ebf3d7b59f1a84 100644
--- a/src/hydro/Gadget2/hydro.h
+++ b/src/hydro/Gadget2/hydro.h
@@ -42,26 +42,59 @@
 #include "minmax.h"
 
 /**
- * @brief Returns the comoving internal energy of a particle
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return gas_internal_energy_from_entropy(p->rho, p->entropy);
+  return gas_internal_energy_from_entropy(p->rho, xp->entropy_full);
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
+  return gas_internal_energy_from_entropy(p->rho * cosmo->a3_inv,
+                                          xp->entropy_full);
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return gas_internal_energy_from_entropy(p->rho, p->entropy);
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
   return gas_internal_energy_from_entropy(p->rho * cosmo->a3_inv, p->entropy);
 }
 
@@ -79,7 +112,8 @@ __attribute__((always_inline)) INLINE static float hydro_get_comoving_pressure(
 /**
  * @brief Returns the physical pressure of a particle
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
     const struct part *restrict p, const struct cosmology *cosmo) {
@@ -88,24 +122,57 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
 }
 
 /**
- * @brief Returns the comoving entropy of a particle
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return p->entropy;
+  return xp->entropy_full;
 }
 
 /**
- * @brief Returns the physical entropy of a particle
+ * @brief Returns the physical entropy of a particl at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return xp->entropy_full;
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return p->entropy;
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -204,32 +271,82 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
 }
 
 /**
- * @brief Returns the time derivative of internal energy of a particle
+ * @brief Returns the time derivative of co-moving internal energy of a particle
  *
  * We assume a constant density.
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return gas_internal_energy_from_entropy(p->rho, p->entropy_dt);
 }
 
 /**
- * @brief Returns the time derivative of internal energy of a particle
+ * @brief Returns the time derivative of physical internal energy of a particle
  *
  * We assume a constant density.
  *
  * @param p The particle of interest.
- * @param du_dt The new time derivative of the internal energy.
+ * @param cosmo The cosmological model.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return gas_internal_energy_from_entropy(p->rho * cosmo->a3_inv,
+                                          p->entropy_dt);
+}
+
+/**
+ * @brief Sets the time derivative of the co-moving internal energy of a
+ * particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param du_dt The new time derivative of the comoving internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p,
+                                      const float du_dt) {
 
   p->entropy_dt = gas_entropy_from_internal_energy(p->rho, du_dt);
 }
 
+/**
+ * @brief Sets the time derivative of the physical internal energy of a particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The time derivative of the physical internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *restrict cosmo,
+                                      const float du_dt) {
+  p->entropy_dt =
+      gas_entropy_from_internal_energy(p->rho * cosmo->a3_inv, du_dt);
+}
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  xp->entropy_full = entropy;
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -361,28 +478,41 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
 /**
  * @brief Prepare a particle for the force calculation.
  *
- * Computes viscosity term, conduction term and smoothing length gradient terms.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
 
-  const float fac_mu = cosmo->a_factor_mu;
+  const float fac_Balsara_eps = cosmo->a_factor_Balsara_eps;
 
-  /* Inverse of the physical density */
+  /* Inverse of the co-moving density */
   const float rho_inv = 1.f / p->rho;
 
+  /* Inverse of the smoothing length */
+  const float h_inv = 1.f / p->h;
+
   /* Compute the norm of the curl */
   const float curl_v = sqrtf(p->density.rot_v[0] * p->density.rot_v[0] +
                              p->density.rot_v[1] * p->density.rot_v[1] +
                              p->density.rot_v[2] * p->density.rot_v[2]);
 
-  /* Compute the norm of div v */
-  const float abs_div_v = fabsf(p->density.div_v);
+  /* Compute the norm of div v including the Hubble flow term */
+  const float div_physical_v = p->density.div_v + 3.f * cosmo->H;
+  const float abs_div_physical_v = fabsf(div_physical_v);
 
   /* Compute the pressure */
   const float pressure = gas_pressure_from_entropy(p->rho, p->entropy);
@@ -394,8 +524,11 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
   const float P_over_rho2 = pressure * rho_inv * rho_inv;
 
   /* Compute the Balsara switch */
-  const float balsara =
-      abs_div_v / (abs_div_v + curl_v + 0.0001f * fac_mu * soundspeed / p->h);
+  /* Pre-multiply in the AV factor; hydro_props are not passed to the iact
+   * functions */
+  const float balsara = hydro_props->viscosity.alpha * abs_div_physical_v /
+                        (abs_div_physical_v + curl_v +
+                         0.0001f * fac_Balsara_eps * soundspeed * h_inv);
 
   /* Compute the "grad h" term */
   const float omega_inv =
@@ -484,7 +617,14 @@ __attribute__((always_inline)) INLINE static void hydro_predict_extra(
   else
     p->rho *= expf(w2);
 
-  /* Predict the entropy */
+    /* Predict the entropy */
+#ifdef SWIFT_DEBUG_CHECKS
+  if (p->entropy + p->entropy_dt * dt_therm <= 0)
+    error(
+        "Negative entropy for particle id %llu old entropy %.5e d_entropy %.5e "
+        "entropy_dt %.5e dt therm %.5e",
+        p->id, p->entropy, p->entropy_dt * dt_therm, p->entropy_dt, dt_therm);
+#endif
   p->entropy += p->entropy_dt * dt_therm;
 
   /* Re-compute the pressure */
@@ -515,8 +655,8 @@ __attribute__((always_inline)) INLINE static void hydro_end_force(
 
   p->force.h_dt *= p->h * hydro_dimension_inv;
 
-  p->entropy_dt = 0.5f * cosmo->a2_inv *
-                  gas_entropy_from_internal_energy(p->rho, p->entropy_dt);
+  p->entropy_dt =
+      0.5f * gas_entropy_from_internal_energy(p->rho, p->entropy_dt);
 }
 
 /**
@@ -525,6 +665,9 @@ __attribute__((always_inline)) INLINE static void hydro_end_force(
  * @param p The particle to act upon
  * @param xp The particle extended data to act upon
  * @param dt_therm The time-step for this kick (for thermodynamic quantities)
+ * @param dt_grav The time-step for this kick (for gravity forces)
+ * @param dt_hydro The time-step for this kick (for hydro forces)
+ * @param dt_kick_corr The time-step for this kick (for correction of the kick)
  * @param cosmo The cosmological model.
  * @param hydro_props The constants used in the scheme
  */
@@ -540,12 +683,13 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
   xp->entropy_full += p->entropy_dt * dt_therm;
 
   /* Apply the minimal energy limit */
-  const float density = p->rho * cosmo->a3_inv;
-  const float min_energy = hydro_props->minimal_internal_energy;
-  const float min_entropy =
-      gas_entropy_from_internal_energy(density, min_energy);
-  if (xp->entropy_full < min_entropy) {
-    xp->entropy_full = min_entropy;
+  const float physical_density = p->rho * cosmo->a3_inv;
+  const float min_physical_energy = hydro_props->minimal_internal_energy;
+  const float min_physical_entropy =
+      gas_entropy_from_internal_energy(physical_density, min_physical_energy);
+  const float min_comoving_entropy = min_physical_entropy; /* A' = A */
+  if (xp->entropy_full < min_comoving_entropy) {
+    xp->entropy_full = min_comoving_entropy;
     p->entropy_dt = 0.f;
   }
 
@@ -571,17 +715,30 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  * @param p The particle to act upon.
  * @param xp The extended data.
  * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
 
-  /* We read u in the entropy field. We now get (comoving) S from (physical) u
-   * and (physical) rho. Note that comoving S == physical S */
+  /* We read u in the entropy field. We now get (comoving) A from (physical) u
+   * and (physical) rho. Note that comoving A (A') == physical A */
   xp->entropy_full =
       gas_entropy_from_internal_energy(p->rho * cosmo->a3_inv, p->entropy);
   p->entropy = xp->entropy_full;
 
+  /* Apply the minimal energy limit */
+  const float physical_density = p->rho * cosmo->a3_inv;
+  const float min_physical_energy = hydro_props->minimal_internal_energy;
+  const float min_physical_entropy =
+      gas_entropy_from_internal_energy(physical_density, min_physical_energy);
+  const float min_comoving_entropy = min_physical_entropy; /* A' = A */
+  if (xp->entropy_full < min_comoving_entropy) {
+    xp->entropy_full = min_comoving_entropy;
+    p->entropy = min_comoving_entropy;
+    p->entropy_dt = 0.f;
+  }
+
   /* Compute the pressure */
   const float pressure = gas_pressure_from_entropy(p->rho, p->entropy);
 
@@ -609,6 +766,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
   xp->v_full[2] = p->v[2];
@@ -638,4 +796,14 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->entropy = u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
 #endif /* SWIFT_GADGET2_HYDRO_H */
diff --git a/src/hydro/Gadget2/hydro_debug.h b/src/hydro/Gadget2/hydro_debug.h
index d0642a03a4c4eecb2da80fdae473948e460c5e31..aeb43ee5d68930debfa867dc856465ac9d22902a 100644
--- a/src/hydro/Gadget2/hydro_debug.h
+++ b/src/hydro/Gadget2/hydro_debug.h
@@ -27,14 +27,14 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "h=%.3e, wcount=%.3f, wcount_dh=%.3e, m=%.3e, dh_drho=%.3e, rho=%.3e, "
       "P=%.3e, P_over_rho2=%.3e, S=%.3e, dS/dt=%.3e, c=%.3e\n"
       "divV=%.3e, rotV=[%.3e,%.3e,%.3e], balsara=%.3e \n "
-      "v_sig=%e dh/dt=%.3e time_bin=%d\n",
+      "v_sig=%e dh/dt=%.3e time_bin=%d wakeup=%d\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
       p->h, p->density.wcount, p->density.wcount_dh, p->mass, p->density.rho_dh,
       p->rho, hydro_get_comoving_pressure(p), p->force.P_over_rho2, p->entropy,
       p->entropy_dt, p->force.soundspeed, p->density.div_v, p->density.rot_v[0],
       p->density.rot_v[1], p->density.rot_v[2], p->force.balsara,
-      p->force.v_sig, p->force.h_dt, p->time_bin);
+      p->force.v_sig, p->force.h_dt, p->time_bin, p->wakeup);
 }
 
 #endif /* SWIFT_GADGET2_HYDRO_DEBUG_H */
diff --git a/src/hydro/Gadget2/hydro_iact.h b/src/hydro/Gadget2/hydro_iact.h
index b2af8909bed1780586a5130370222c9b8157d724..1ded85acfb7486b1286ddfbbfa698da0f4344e7d 100644
--- a/src/hydro/Gadget2/hydro_iact.h
+++ b/src/hydro/Gadget2/hydro_iact.h
@@ -55,6 +55,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_density(
   float wj, wj_dx;
   float dv[3], curlvr[3];
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Get the masses. */
   const float mi = pi->mass;
   const float mj = pj->mass;
@@ -145,6 +152,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_density(
   float wi, wi_dx;
   float dv[3], curlvr[3];
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Get the masses. */
   const float mj = pj->mass;
 
@@ -279,7 +293,7 @@ runner_iact_nonsym_2_vec_density(float *R2, float *Dx, float *Dy, float *Dz,
                                  vector *wcountSum, vector *wcount_dhSum,
                                  vector *div_vSum, vector *curlvxSum,
                                  vector *curlvySum, vector *curlvzSum,
-                                 mask_t mask, mask_t mask2, short mask_cond) {
+                                 mask_t mask, mask_t mask2, int mask_cond) {
 
   vector r, ri, ui, wi, wi_dx;
   vector dvx, dvy, dvz;
@@ -436,6 +450,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
 
   float wi, wj, wi_dx, wj_dx;
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Cosmological factors entering the EoMs */
   const float fac_mu = pow_three_gamma_minus_five_over_two(a);
   const float a2_Hubble = a * a * H;
@@ -479,23 +500,25 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Add Hubble flow */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
   const float balsara_j = pj->force.balsara;
 
   /* Are the particles moving towards each others ? */
-  const float omega_ij = (dvdr < 0.f) ? dvdr : 0.f;
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Signal velocity */
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Now construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Now, convolve with the kernel */
   const float visc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -523,8 +546,8 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   pj->force.v_sig = max(pj->force.v_sig, v_sig);
 
   /* Change in entropy */
-  pi->entropy_dt += mj * visc_term * dvdr;
-  pj->entropy_dt += mi * visc_term * dvdr;
+  pi->entropy_dt += mj * visc_term * dvdr_Hubble;
+  pj->entropy_dt += mi * visc_term * dvdr_Hubble;
 
 #ifdef DEBUG_INTERACTIONS_SPH
   /* Update ngb counters */
@@ -556,6 +579,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
 
   float wi, wj, wi_dx, wj_dx;
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Cosmological factors entering the EoMs */
   const float fac_mu = pow_three_gamma_minus_five_over_two(a);
   const float a2_Hubble = a * a * H;
@@ -599,23 +629,25 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Add Hubble flow */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
   const float balsara_j = pj->force.balsara;
 
   /* Are the particles moving towards each others ? */
-  const float omega_ij = (dvdr < 0.f) ? dvdr : 0.f;
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Signal velocity */
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Now construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Now, convolve with the kernel */
   const float visc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -637,7 +669,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   pi->force.v_sig = max(pi->force.v_sig, v_sig);
 
   /* Change in entropy */
-  pi->entropy_dt += mj * visc_term * dvdr;
+  pi->entropy_dt += mj * visc_term * dvdr_Hubble;
 
 #ifdef DEBUG_INTERACTIONS_SPH
   /* Update ngb counters */
@@ -648,8 +680,6 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
 }
 
 #ifdef WITH_VECTORIZATION
-static const vector const_viscosity_alpha_fac =
-    FILL_VEC(-0.25f * const_viscosity_alpha);
 
 /**
  * @brief Force interaction computed using 1 vector
@@ -671,7 +701,7 @@ runner_iact_nonsym_1_vec_force(
   vector dvx, dvy, dvz;
   vector xi, xj;
   vector hid_inv, hjd_inv;
-  vector wi_dx, wj_dx, wi_dr, wj_dr, dvdr;
+  vector wi_dx, wj_dx, wi_dr, wj_dr, dvdr, dvdr_Hubble;
   vector piax, piay, piaz;
   vector pih_dt;
   vector v_sig;
@@ -723,25 +753,26 @@ runner_iact_nonsym_1_vec_force(
   dvz.v = vec_sub(viz.v, vjz.v);
 
   /* Compute dv dot r. */
-  dvdr.v =
-      vec_fma(dvx.v, dx->v,
-              vec_fma(dvy.v, dy->v,
-                      vec_fma(dvz.v, dz->v, vec_mul(v_a2_Hubble.v, r2->v))));
+  dvdr.v = vec_fma(dvx.v, dx->v, vec_fma(dvy.v, dy->v, vec_mul(dvz.v, dz->v)));
+
+  /* Add Hubble flow */
+  dvdr_Hubble.v = vec_add(dvdr.v, vec_mul(v_a2_Hubble.v, r2->v));
 
   /* Compute the relative velocity. (This is 0 if the particles move away from
    * each other and negative otherwise) */
-  omega_ij.v = vec_fmin(dvdr.v, vec_setzero());
+  omega_ij.v = vec_fmin(dvdr_Hubble.v, vec_setzero());
   mu_ij.v = vec_mul(v_fac_mu.v,
                     vec_mul(ri.v, omega_ij.v)); /* This is 0 or negative */
 
   /* Compute signal velocity */
-  v_sig.v = vec_fnma(vec_set1(3.f), mu_ij.v, vec_add(ci.v, cj.v));
+  v_sig.v =
+      vec_fnma(vec_set1(const_viscosity_beta), mu_ij.v, vec_add(ci.v, cj.v));
 
   /* Now construct the full viscosity term */
   rho_ij.v = vec_mul(vec_set1(0.5f), vec_add(pirho.v, pjrho.v));
-  visc.v = vec_div(vec_mul(const_viscosity_alpha_fac.v,
-                           vec_mul(v_sig.v, vec_mul(mu_ij.v, balsara.v))),
-                   rho_ij.v);
+  visc.v = vec_div(
+      vec_mul(vec_set1(-0.25f), vec_mul(v_sig.v, vec_mul(mu_ij.v, balsara.v))),
+      rho_ij.v);
 
   /* Now, convolve with the kernel */
   visc_term.v =
@@ -766,7 +797,7 @@ runner_iact_nonsym_1_vec_force(
       vec_div(vec_mul(mj.v, vec_mul(dvdr.v, vec_mul(ri.v, wi_dr.v))), pjrho.v);
 
   /* Change in entropy */
-  entropy_dt.v = vec_mul(mj.v, vec_mul(visc_term.v, dvdr.v));
+  entropy_dt.v = vec_mul(mj.v, vec_mul(visc_term.v, dvdr_Hubble.v));
 
   /* Store the forces back on the particles. */
   a_hydro_xSum->v = vec_mask_sub(a_hydro_xSum->v, piax.v, mask);
@@ -806,7 +837,7 @@ runner_iact_nonsym_2_vec_force(
   vector dvx, dvy, dvz;
   vector ui, uj;
   vector hid_inv, hjd_inv;
-  vector wi_dx, wj_dx, wi_dr, wj_dr, dvdr;
+  vector wi_dx, wj_dx, wi_dr, wj_dr, dvdr, dvdr_Hubble;
   vector piax, piay, piaz;
   vector pih_dt;
   vector v_sig;
@@ -817,7 +848,7 @@ runner_iact_nonsym_2_vec_force(
   vector dvx_2, dvy_2, dvz_2;
   vector ui_2, uj_2;
   vector hjd_inv_2;
-  vector wi_dx_2, wj_dx_2, wi_dr_2, wj_dr_2, dvdr_2;
+  vector wi_dx_2, wj_dx_2, wi_dr_2, wj_dr_2, dvdr_2, dvdr_Hubble_2;
   vector piax_2, piay_2, piaz_2;
   vector pih_dt_2;
   vector v_sig_2;
@@ -903,36 +934,38 @@ runner_iact_nonsym_2_vec_force(
   dvz_2.v = vec_sub(viz.v, vjz_2.v);
 
   /* Compute dv dot r. */
-  dvdr.v = vec_fma(
-      dvx.v, dx.v,
-      vec_fma(dvy.v, dy.v, vec_fma(dvz.v, dz.v, vec_mul(v_a2_Hubble.v, r2.v))));
-  dvdr_2.v = vec_fma(
-      dvx_2.v, dx_2.v,
-      vec_fma(dvy_2.v, dy_2.v,
-              vec_fma(dvz_2.v, dz_2.v, vec_mul(v_a2_Hubble.v, r2_2.v))));
+  dvdr.v = vec_fma(dvx.v, dx.v, vec_fma(dvy.v, dy.v, vec_mul(dvz.v, dz.v)));
+  dvdr_2.v = vec_fma(dvx_2.v, dx_2.v,
+                     vec_fma(dvy_2.v, dy_2.v, vec_mul(dvz_2.v, dz_2.v)));
+
+  /* Add the Hubble flow */
+  dvdr_Hubble.v = vec_add(dvdr.v, vec_mul(v_a2_Hubble.v, r2.v));
+  dvdr_Hubble_2.v = vec_add(dvdr_2.v, vec_mul(v_a2_Hubble.v, r2_2.v));
 
   /* Compute the relative velocity. (This is 0 if the particles move away from
    * each other and negative otherwise) */
-  omega_ij.v = vec_fmin(dvdr.v, vec_setzero());
-  omega_ij_2.v = vec_fmin(dvdr_2.v, vec_setzero());
+  omega_ij.v = vec_fmin(dvdr_Hubble.v, vec_setzero());
+  omega_ij_2.v = vec_fmin(dvdr_Hubble_2.v, vec_setzero());
   mu_ij.v = vec_mul(v_fac_mu.v,
                     vec_mul(ri.v, omega_ij.v)); /* This is 0 or negative */
   mu_ij_2.v = vec_mul(
       v_fac_mu.v, vec_mul(ri_2.v, omega_ij_2.v)); /* This is 0 or negative */
 
   /* Compute signal velocity */
-  v_sig.v = vec_fnma(vec_set1(3.f), mu_ij.v, vec_add(ci.v, cj.v));
-  v_sig_2.v = vec_fnma(vec_set1(3.f), mu_ij_2.v, vec_add(ci.v, cj_2.v));
+  v_sig.v =
+      vec_fnma(vec_set1(const_viscosity_beta), mu_ij.v, vec_add(ci.v, cj.v));
+  v_sig_2.v = vec_fnma(vec_set1(const_viscosity_beta), mu_ij_2.v,
+                       vec_add(ci.v, cj_2.v));
 
   /* Now construct the full viscosity term */
   rho_ij.v = vec_mul(vec_set1(0.5f), vec_add(pirho.v, pjrho.v));
   rho_ij_2.v = vec_mul(vec_set1(0.5f), vec_add(pirho.v, pjrho_2.v));
 
-  visc.v = vec_div(vec_mul(const_viscosity_alpha_fac.v,
-                           vec_mul(v_sig.v, vec_mul(mu_ij.v, balsara.v))),
-                   rho_ij.v);
+  visc.v = vec_div(
+      vec_mul(vec_set1(-0.25f), vec_mul(v_sig.v, vec_mul(mu_ij.v, balsara.v))),
+      rho_ij.v);
   visc_2.v =
-      vec_div(vec_mul(const_viscosity_alpha_fac.v,
+      vec_div(vec_mul(vec_set1(-0.25f),
                       vec_mul(v_sig_2.v, vec_mul(mu_ij_2.v, balsara_2.v))),
               rho_ij_2.v);
 
@@ -976,8 +1009,8 @@ runner_iact_nonsym_2_vec_force(
               pjrho_2.v);
 
   /* Change in entropy */
-  entropy_dt.v = vec_mul(mj.v, vec_mul(visc_term.v, dvdr.v));
-  entropy_dt_2.v = vec_mul(mj_2.v, vec_mul(visc_term_2.v, dvdr_2.v));
+  entropy_dt.v = vec_mul(mj.v, vec_mul(visc_term.v, dvdr_Hubble.v));
+  entropy_dt_2.v = vec_mul(mj_2.v, vec_mul(visc_term_2.v, dvdr_Hubble_2.v));
 
   /* Store the forces back on the particles. */
   if (mask_cond) {
@@ -1018,4 +1051,34 @@ runner_iact_nonsym_2_vec_force(
 
 #endif
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+
+    // MATTHIEU
+    // if (pj->wakeup == time_bin_not_awake)
+    // pj->wakeup = time_bin_awake;
+    // else if (pj->wakeup > 0)
+    // pj->wakeup = -pj->wakeup;
+  }
+}
+
 #endif /* SWIFT_GADGET2_HYDRO_IACT_H */
diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h
index 3f2af41dc7f0cc8f60992a15a0f09f3c90f764fe..ec7d34f7ad8697f1d639ea4951011ddb06ec8833 100644
--- a/src/hydro/Gadget2/hydro_io.h
+++ b/src/hydro/Gadget2/hydro_io.h
@@ -59,7 +59,7 @@ INLINE static void hydro_read_particles(struct part* parts,
 INLINE static void convert_part_u(const struct engine* e, const struct part* p,
                                   const struct xpart* xp, float* ret) {
 
-  ret[0] = hydro_get_comoving_internal_energy(p);
+  ret[0] = hydro_get_comoving_internal_energy(p, xp);
 }
 
 INLINE static void convert_part_P(const struct engine* e, const struct part* p,
@@ -132,6 +132,7 @@ INLINE static void convert_part_potential(const struct engine* e,
  * @brief Specifies which particle fields to write to a dataset
  *
  * @param parts The particle array.
+ * @param xparts The extended particle data array.
  * @param list The list of i/o properties to write.
  * @param num_fields The number of i/o fields to write.
  */
@@ -199,8 +200,6 @@ INLINE static void hydro_write_flavour(hid_t h_grpsph) {
   io_write_attribute_s(
       h_grpsph, "Viscosity Model",
       "as in Springel (2005), i.e. Monaghan (1992) with Balsara (1995) switch");
-  io_write_attribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha);
-  io_write_attribute_f(h_grpsph, "Viscosity beta", 3.f);
 }
 
 /**
diff --git a/src/hydro/Gadget2/hydro_part.h b/src/hydro/Gadget2/hydro_part.h
index 90f73571701b37b3377601655330d8d25f862a05..3001700395b8584981c0087c8ff402a953461213 100644
--- a/src/hydro/Gadget2/hydro_part.h
+++ b/src/hydro/Gadget2/hydro_part.h
@@ -33,6 +33,9 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "logger.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /* Extra particle data not needed during the SPH loops over neighbours. */
 struct xpart {
@@ -55,6 +58,17 @@ struct xpart {
   /* Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
+#ifdef WITH_LOGGER
+  /* Additional data for the particle logger */
+  struct logger_part_data logger_data;
+#endif
+
 } SWIFT_STRUCT_ALIGN;
 
 /* Data of a single particle. */
@@ -140,6 +154,9 @@ struct part {
   /* Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/GizmoMFM/hydro.h b/src/hydro/GizmoMFM/hydro.h
index 41c870da0cc4630896324b5fdab4b6ca2d4362bc..a4a54e7b551cc643bffedb8661f4fe269d348dc4 100644
--- a/src/hydro/GizmoMFM/hydro.h
+++ b/src/hydro/GizmoMFM/hydro.h
@@ -137,6 +137,9 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
                                  p->conserved.momentum[2] * p->v[2]);
 #endif
 
+  p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
+
   /* initialize the particle velocity based on the primitive fluid velocity */
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
@@ -441,17 +444,24 @@ __attribute__((always_inline)) INLINE static void hydro_end_gradient(
 /**
  * @brief Prepare a particle for the force calculation.
  *
- * This function is called in the extra_ghost task to convert some quantities
- * coming from the gradient loop over neighbours into quantities ready to be
- * used in the force loop over neighbours.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part* restrict p, struct xpart* restrict xp,
-    const struct cosmology* cosmo) {
+    const struct cosmology* cosmo, const struct hydro_props* hydro_props,
+    const float dt_alpha) {
 
   /* Initialise values that are used in the force loop */
   p->flux.momentum[0] = 0.0f;
@@ -501,7 +511,8 @@ __attribute__((always_inline)) INLINE static void hydro_reset_predicted_values(
  * @param p The particle to act upon.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
-    struct part* p, struct xpart* xp, const struct cosmology* cosmo) {
+    struct part* p, struct xpart* xp, const struct cosmology* cosmo,
+    const struct hydro_props* hydro_props) {
 
   p->conserved.energy /= cosmo->a_factor_internal_energy;
 }
@@ -714,16 +725,45 @@ hydro_get_comoving_internal_energy(const struct part* restrict p) {
  * @brief Returns the physical internal energy of a particle
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part* restrict p,
+                                   const struct xpart* restrict xp,
                                    const struct cosmology* cosmo) {
 
   return cosmo->a_factor_internal_energy *
          hydro_get_comoving_internal_energy(p);
 }
 
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part* restrict p) {
+
+  return hydro_get_comoving_internal_energy(p);
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part* restrict p,
+                                           const struct cosmology* cosmo) {
+
+  return hydro_get_comoving_internal_energy(p) *
+         cosmo->a_factor_internal_energy;
+}
+
 /**
  * @brief Returns the comoving entropy of a particle
  *
@@ -743,10 +783,27 @@ __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
  * @brief Returns the physical internal energy of a particle
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part* restrict p, const struct cosmology* cosmo) {
+    const struct part* restrict p, const struct xpart* restrict xp,
+    const struct cosmology* cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return hydro_get_comoving_entropy(p);
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part* restrict p,
+                                   const struct cosmology* cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -856,6 +913,80 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
   v[2] += xp->a_grav[2] * dt_kick_grav;
 }
 
+/**
+ * @brief Returns the time derivative of co-moving internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part* restrict p) {
+
+  error("Needs implementing");
+  return 0.f;
+}
+
+/**
+ * @brief Returns the time derivative of physical internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part* restrict p,
+                                      const struct cosmology* cosmo) {
+  error("Needs implementing");
+  return 0.f;
+}
+
+/**
+ * @brief Sets the time derivative of the co-moving internal energy of a
+ * particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param du_dt The new time derivative of the comoving internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part* restrict p,
+                                      const float du_dt) {
+  error("Needs implementing");
+}
+
+/**
+ * @brief Sets the time derivative of the physical internal energy of a particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The time derivative of the physical internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part* restrict p,
+                                      const struct cosmology* restrict cosmo,
+                                      const float du_dt) {
+  error("Needs implementing");
+}
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part* p, struct xpart* xp, const struct cosmology* cosmo,
+    const float entropy) {
+
+  error("Needs implementing");
+}
+
 /**
  * @brief Returns the comoving density of a particle
  *
@@ -954,4 +1085,14 @@ hydro_set_init_internal_energy(struct part* p, float u_init) {
   p->P = hydro_gamma_minus_one * p->rho * u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part* p, const struct xpart* xp) {}
+
 #endif /* SWIFT_GIZMO_MFM_HYDRO_H */
diff --git a/src/hydro/GizmoMFM/hydro_debug.h b/src/hydro/GizmoMFM/hydro_debug.h
index e8b0914bd3cf6a99210399c6fc654e526319009f..e3c9f793aec92c7bfa2527143e6ad771c3897a09 100644
--- a/src/hydro/GizmoMFM/hydro_debug.h
+++ b/src/hydro/GizmoMFM/hydro_debug.h
@@ -27,6 +27,7 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "a=[%.3e,%.3e,%.3e], "
       "h=%.3e, "
       "time_bin=%d, "
+      "wakeup=%d, "
       "rho=%.3e, "
       "P=%.3e, "
       "gradients={"
@@ -51,7 +52,7 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "wcount_dh=%.3e, "
       "wcount=%.3e}\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], p->a_hydro[0],
-      p->a_hydro[1], p->a_hydro[2], p->h, p->time_bin, p->rho, p->P,
+      p->a_hydro[1], p->a_hydro[2], p->h, p->time_bin, p->wakeup, p->rho, p->P,
       p->gradients.rho[0], p->gradients.rho[1], p->gradients.rho[2],
       p->gradients.v[0][0], p->gradients.v[0][1], p->gradients.v[0][2],
       p->gradients.v[1][0], p->gradients.v[1][1], p->gradients.v[1][2],
diff --git a/src/hydro/GizmoMFM/hydro_iact.h b/src/hydro/GizmoMFM/hydro_iact.h
index 5bed20d7f894a76d5fe3642c7438dc03195e43d6..09d4c7c70ee2bae8a31d10cb4a568c4627c7b3cd 100644
--- a/src/hydro/GizmoMFM/hydro_iact.h
+++ b/src/hydro/GizmoMFM/hydro_iact.h
@@ -267,8 +267,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_fluxes_common(
   const float dvdotdx = min(dvdr, 0.0f);
 
   /* Get the signal velocity */
-  /* the magical factor 3 also appears in Gadget2 */
-  vmax -= 3.0f * dvdotdx * r_inv;
+  vmax -= const_viscosity_beta * dvdotdx * r_inv;
 
   /* Store the signal velocity */
   pi->timestepvars.vmax = max(pi->timestepvars.vmax, vmax);
@@ -487,4 +486,29 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   runner_iact_fluxes_common(r2, dx, hi, hj, pi, pj, 0, a, H);
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->timestepvars.vmax >
+      const_limiter_max_v_sig_ratio * pj->timestepvars.vmax) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_GIZMO_MFM_HYDRO_IACT_H */
diff --git a/src/hydro/GizmoMFM/hydro_part.h b/src/hydro/GizmoMFM/hydro_part.h
index 0055d7d86a35746a8ba90015b3a6986f8ddb5f9f..8097b1b2560f24f78636bbb855700054524fe0bb 100644
--- a/src/hydro/GizmoMFM/hydro_part.h
+++ b/src/hydro/GizmoMFM/hydro_part.h
@@ -21,6 +21,8 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /* Extra particle data not needed during the computation. */
 struct xpart {
@@ -40,6 +42,12 @@ struct xpart {
   /* Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /* Data of a single particle. */
@@ -187,6 +195,9 @@ struct part {
   /* Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/GizmoMFV/hydro.h b/src/hydro/GizmoMFV/hydro.h
index c1a62e3c16b98e98b881f3fe4ddcd539cf842c9d..974f57ed68bbc409697111c52c40f36d4a5cb9b1 100644
--- a/src/hydro/GizmoMFV/hydro.h
+++ b/src/hydro/GizmoMFV/hydro.h
@@ -121,6 +121,9 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
 
   const float mass = p->conserved.mass;
 
+  p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
+
   p->primitives.v[0] = p->v[0];
   p->primitives.v[1] = p->v[1];
   p->primitives.v[2] = p->v[2];
@@ -466,17 +469,24 @@ __attribute__((always_inline)) INLINE static void hydro_end_gradient(
 /**
  * @brief Prepare a particle for the force calculation.
  *
- * This function is called in the extra_ghost task to convert some quantities
- * coming from the gradient loop over neighbours into quantities ready to be
- * used in the force loop over neighbours.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part* restrict p, struct xpart* restrict xp,
-    const struct cosmology* cosmo) {
+    const struct cosmology* cosmo, const struct hydro_props* hydro_props,
+    const float dt_alpha) {
 
   /* Initialise values that are used in the force loop */
   p->gravity.mflux[0] = 0.0f;
@@ -543,7 +553,8 @@ __attribute__((always_inline)) INLINE static void hydro_reset_predicted_values(
  * @param p The particle to act upon.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
-    struct part* p, struct xpart* xp, const struct cosmology* cosmo) {
+    struct part* p, struct xpart* xp, const struct cosmology* cosmo,
+    const struct hydro_props* hydro_props) {
 
   p->conserved.energy /= cosmo->a_factor_internal_energy;
 }
@@ -800,16 +811,31 @@ hydro_get_comoving_internal_energy(const struct part* restrict p) {
  * @brief Returns the physical internal energy of a particle
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part* restrict p,
+                                   const struct xpart* restrict xp,
                                    const struct cosmology* cosmo) {
 
   return cosmo->a_factor_internal_energy *
          hydro_get_comoving_internal_energy(p);
 }
 
+/**
+ * @brief Returns the physical internal energy of a particle
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part* restrict p,
+                                           const struct cosmology* cosmo) {
+
+  return hydro_get_physical_internal_energy(p, /*xp=*/NULL, cosmo);
+}
+
 /**
  * @brief Returns the comoving entropy of a particle
  *
@@ -829,10 +855,27 @@ __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
  * @brief Returns the physical internal energy of a particle
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part* restrict p, const struct cosmology* cosmo) {
+    const struct part* restrict p, const struct xpart* restrict xp,
+    const struct cosmology* cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return hydro_get_comoving_entropy(p);
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part* restrict p,
+                                   const struct cosmology* cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -944,6 +987,80 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
   v[2] += xp->a_grav[2] * dt_kick_grav;
 }
 
+/**
+ * @brief Returns the time derivative of co-moving internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part* restrict p) {
+
+  error("Needs implementing");
+  return 0.f;
+}
+
+/**
+ * @brief Returns the time derivative of physical internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part* restrict p,
+                                      const struct cosmology* cosmo) {
+  error("Needs implementing");
+  return 0.f;
+}
+
+/**
+ * @brief Sets the time derivative of the co-moving internal energy of a
+ * particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param du_dt The new time derivative of the comoving internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part* restrict p,
+                                      const float du_dt) {
+  error("Needs implementing");
+}
+
+/**
+ * @brief Sets the time derivative of the physical internal energy of a particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The time derivative of the physical internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part* restrict p,
+                                      const struct cosmology* restrict cosmo,
+                                      const float du_dt) {
+  error("Needs implementing");
+}
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part* p, struct xpart* xp, const struct cosmology* cosmo,
+    const float entropy) {
+
+  error("Needs implementing");
+}
+
 /**
  * @brief Returns the comoving density of a particle
  *
@@ -1042,4 +1159,14 @@ hydro_set_init_internal_energy(struct part* p, float u_init) {
   p->primitives.P = hydro_gamma_minus_one * p->primitives.rho * u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part* p, const struct xpart* xp) {}
+
 #endif /* SWIFT_GIZMO_MFV_HYDRO_H */
diff --git a/src/hydro/GizmoMFV/hydro_debug.h b/src/hydro/GizmoMFV/hydro_debug.h
index 8af3f824666529efad833c3bd520ace779718449..181bd6f82d547803c7303bd19be11cf66dc3a8a8 100644
--- a/src/hydro/GizmoMFV/hydro_debug.h
+++ b/src/hydro/GizmoMFV/hydro_debug.h
@@ -27,6 +27,7 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "a=[%.3e,%.3e,%.3e], "
       "h=%.3e, "
       "time_bin=%d, "
+      "wakeup=%d, "
       "primitives={"
       "v=[%.3e,%.3e,%.3e], "
       "rho=%.3e, "
@@ -53,9 +54,9 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "wcount_dh=%.3e, "
       "wcount=%.3e}\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], p->a_hydro[0],
-      p->a_hydro[1], p->a_hydro[2], p->h, p->time_bin, p->primitives.v[0],
-      p->primitives.v[1], p->primitives.v[2], p->primitives.rho,
-      p->primitives.P, p->primitives.gradients.rho[0],
+      p->a_hydro[1], p->a_hydro[2], p->h, p->time_bin, p->wakeup,
+      p->primitives.v[0], p->primitives.v[1], p->primitives.v[2],
+      p->primitives.rho, p->primitives.P, p->primitives.gradients.rho[0],
       p->primitives.gradients.rho[1], p->primitives.gradients.rho[2],
       p->primitives.gradients.v[0][0], p->primitives.gradients.v[0][1],
       p->primitives.gradients.v[0][2], p->primitives.gradients.v[1][0],
diff --git a/src/hydro/GizmoMFV/hydro_iact.h b/src/hydro/GizmoMFV/hydro_iact.h
index c766ce3cc9048f8da8b3438c3c27e6998dd5df7e..d882549f8c55018419a2e1730d2ac099bbe1f5ee 100644
--- a/src/hydro/GizmoMFV/hydro_iact.h
+++ b/src/hydro/GizmoMFV/hydro_iact.h
@@ -271,8 +271,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_fluxes_common(
   dvdotdx = min(dvdotdx, 0.f);
 
   /* Get the signal velocity */
-  /* the magical factor 3 also appears in Gadget2 */
-  vmax -= 3.f * dvdotdx * r_inv;
+  vmax -= const_viscosity_beta * dvdotdx * r_inv;
 
   /* Store the signal velocity */
   pi->timestepvars.vmax = max(pi->timestepvars.vmax, vmax);
@@ -502,4 +501,29 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   runner_iact_fluxes_common(r2, dx, hi, hj, pi, pj, 0, a, H);
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->timestepvars.vmax >
+      const_limiter_max_v_sig_ratio * pj->timestepvars.vmax) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_GIZMO_MFV_HYDRO_IACT_H */
diff --git a/src/hydro/GizmoMFV/hydro_part.h b/src/hydro/GizmoMFV/hydro_part.h
index 6248ddb11daf39a65be9a57fe51e40386ecda50b..af83dbd92590a30aa401e1cc5626554633058b1f 100644
--- a/src/hydro/GizmoMFV/hydro_part.h
+++ b/src/hydro/GizmoMFV/hydro_part.h
@@ -21,6 +21,8 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /* Extra particle data not needed during the computation. */
 struct xpart {
@@ -40,6 +42,12 @@ struct xpart {
   /* Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /* Data of a single particle. */
@@ -198,6 +206,9 @@ struct part {
   /* Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/Minimal/hydro.h b/src/hydro/Minimal/hydro.h
index 01abe55e7267ca04a7f3e9740c10c681f86f29ea..a5808468300da234a91a86feb897a9398e14db90 100644
--- a/src/hydro/Minimal/hydro.h
+++ b/src/hydro/Minimal/hydro.h
@@ -44,35 +44,58 @@
 #include "minmax.h"
 
 /**
- * @brief Returns the comoving internal energy of a particle
- *
- * For implementations where the main thermodynamic variable
- * is not internal energy, this function computes the internal
- * energy from the thermodynamic variable.
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return p->u;
+  return xp->u_full;
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
- *
- * For implementations where the main thermodynamic variable
- * is not internal energy, this function computes the internal
- * energy from the thermodynamic variable and converts it to
- * physical coordinates.
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
   return p->u * cosmo->a_factor_internal_energy;
 }
 
@@ -106,33 +129,57 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
 }
 
 /**
- * @brief Returns the comoving entropy of a particle
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
  *
- * For implementations where the main thermodynamic variable
- * is not entropy, this function computes the entropy from
- * the thermodynamic variable.
- *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return gas_entropy_from_internal_energy(p->rho, p->u);
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
 }
 
 /**
- * @brief Returns the physical entropy of a particle
- *
- * For implementations where the main thermodynamic variable
- * is not entropy, this function computes the entropy from
- * the thermodynamic variable and converts it to
- * physical coordinates.
+ * @brief Returns the physical entropy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -231,14 +278,14 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
 }
 
 /**
- * @brief Returns the time derivative of internal energy of a particle
+ * @brief Returns the time derivative of co-moving internal energy of a particle
  *
  * We assume a constant density.
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return p->u_dt;
 }
@@ -248,14 +295,65 @@ __attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
  *
  * We assume a constant density.
  *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->u_dt * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the time derivative of the co-moving internal energy of a
+ * particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
  * @param p The particle of interest.
  * @param du_dt The new time derivative of the internal energy.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
 
   p->u_dt = du_dt;
 }
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->u_dt = du_dt / cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  const float comoving_entropy = entropy;
+  xp->u_full = gas_internal_energy_from_entropy(p->rho, comoving_entropy);
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -308,6 +406,10 @@ __attribute__((always_inline)) INLINE static void hydro_init_part(
   p->density.wcount_dh = 0.f;
   p->rho = 0.f;
   p->density.rho_dh = 0.f;
+  p->density.div_v = 0.f;
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
 }
 
 /**
@@ -343,6 +445,17 @@ __attribute__((always_inline)) INLINE static void hydro_end_density(
   p->density.rho_dh *= h_inv_dim_plus_one;
   p->density.wcount *= h_inv_dim;
   p->density.wcount_dh *= h_inv_dim_plus_one;
+
+  const float rho_inv = 1.f / p->rho;
+  const float a_inv2 = cosmo->a2_inv;
+
+  /* Finish calculation of the (physical) velocity curl components */
+  p->density.rot_v[0] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[1] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[2] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+
+  /* Finish calculation of the (physical) velocity divergence */
+  p->density.div_v *= h_inv_dim_plus_one * a_inv2 * rho_inv;
 }
 
 /**
@@ -370,6 +483,10 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
   p->density.wcount = kernel_root * h_inv_dim;
   p->density.rho_dh = 0.f;
   p->density.wcount_dh = 0.f;
+  p->density.div_v = 0.f;
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
 }
 
 /**
@@ -385,10 +502,28 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
+
+  const float fac_Balsara_eps = cosmo->a_factor_Balsara_eps;
+
+  /* Inverse of the smoothing length */
+  const float h_inv = 1.f / p->h;
+
+  /* Compute the norm of the curl */
+  const float curl_v = sqrtf(p->density.rot_v[0] * p->density.rot_v[0] +
+                             p->density.rot_v[1] * p->density.rot_v[1] +
+                             p->density.rot_v[2] * p->density.rot_v[2]);
+
+  /* Compute the norm of div v including the Hubble flow term */
+  const float div_physical_v = p->density.div_v + 3.f * cosmo->H;
+  const float abs_div_physical_v = fabsf(div_physical_v);
 
   /* Compute the pressure */
   const float pressure = gas_pressure_from_internal_energy(p->rho, p->u);
@@ -401,10 +536,18 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
   const float grad_h_term =
       1.f / (1.f + hydro_dimension_inv * p->h * p->density.rho_dh * rho_inv);
 
+  /* Compute the Balsara switch */
+  /* Pre-multiply in the AV factor; hydro_props are not passed to the iact
+   * functions */
+  const float balsara = hydro_props->viscosity.alpha * abs_div_physical_v /
+                        (abs_div_physical_v + curl_v +
+                         0.0001f * fac_Balsara_eps * soundspeed * h_inv);
+
   /* Update variables. */
   p->force.f = grad_h_term;
   p->force.pressure = pressure;
   p->force.soundspeed = soundspeed;
+  p->force.balsara = balsara;
 }
 
 /**
@@ -522,6 +665,9 @@ __attribute__((always_inline)) INLINE static void hydro_end_force(
  * @param p The particle to act upon.
  * @param xp The particle extended data to act upon.
  * @param dt_therm The time-step for this kick (for thermodynamic quantities).
+ * @param dt_grav The time-step for this kick (for gravity quantities).
+ * @param dt_hydro The time-step for this kick (for hydro quantities).
+ * @param dt_kick_corr The time-step for this kick (for gravity corrections).
  * @param cosmo The cosmological model.
  * @param hydro_props The constants used in the scheme
  */
@@ -537,10 +683,10 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
   xp->u_full += p->u_dt * dt_therm;
 
   /* Apply the minimal energy limit */
-  const float min_energy =
-      hydro_props->minimal_internal_energy * cosmo->a_factor_internal_energy;
-  if (xp->u_full < min_energy) {
-    xp->u_full = min_energy;
+  const float min_comoving_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_comoving_energy) {
+    xp->u_full = min_comoving_energy;
     p->u_dt = 0.f;
   }
 
@@ -548,7 +694,8 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
   const float pressure = gas_pressure_from_internal_energy(p->rho, xp->u_full);
 
   /* Compute the sound speed */
-  const float soundspeed = gas_soundspeed_from_internal_energy(p->rho, p->u);
+  const float soundspeed =
+      gas_soundspeed_from_internal_energy(p->rho, xp->u_full);
 
   p->force.pressure = pressure;
   p->force.soundspeed = soundspeed;
@@ -565,10 +712,26 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  * @param p The particle to act upon
  * @param xp The extended particle to act upon
  * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Convert the physcial internal energy to the comoving one. */
+  /* u' = a^(3(g-1)) u */
+  const float factor = 1.f / cosmo->a_factor_internal_energy;
+  p->u *= factor;
+  xp->u_full = p->u;
+
+  /* Apply the minimal energy limit */
+  const float min_comoving_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_comoving_energy) {
+    xp->u_full = min_comoving_energy;
+    p->u = min_comoving_energy;
+    p->u_dt = 0.f;
+  }
 
   /* Compute the pressure */
   const float pressure = gas_pressure_from_internal_energy(p->rho, p->u);
@@ -594,6 +757,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
   xp->v_full[2] = p->v[2];
@@ -623,4 +787,14 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->u = u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
 #endif /* SWIFT_MINIMAL_HYDRO_H */
diff --git a/src/hydro/Minimal/hydro_debug.h b/src/hydro/Minimal/hydro_debug.h
index 73ffc26b8acf687a5445591ddccd72ea8e8fa8ae..3fadd05f9b93e53f1855c5daa7727d272ffe0fa5 100644
--- a/src/hydro/Minimal/hydro_debug.h
+++ b/src/hydro/Minimal/hydro_debug.h
@@ -41,12 +41,12 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "v_full=[%.3g, %.3g, %.3g], a=[%.3g, %.3g, %.3g], \n "
       "m=%.3g, u=%.3g, du/dt=%.3g, P=%.3g, c_s=%.3g, \n "
       "v_sig=%.3g, h=%.3g, dh/dt=%.3g, wcount=%.3g, rho=%.3g, \n "
-      "dh_drho=%.3g, time_bin=%d \n",
+      "dh_drho=%.3g, time_bin=%d wakeup=%d \n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
       p->mass, p->u, p->u_dt, hydro_get_comoving_pressure(p),
       p->force.soundspeed, p->force.v_sig, p->h, p->force.h_dt,
-      p->density.wcount, p->rho, p->density.rho_dh, p->time_bin);
+      p->density.wcount, p->rho, p->density.rho_dh, p->time_bin, p->wakeup);
 }
 
 #endif /* SWIFT_MINIMAL_HYDRO_DEBUG_H */
diff --git a/src/hydro/Minimal/hydro_iact.h b/src/hydro/Minimal/hydro_iact.h
index 42fd93d6062cbfcea5cf5297eeda0bb6525f3cad..7fc7a3c67f6c832d70109319ad964e25df30ff4e 100644
--- a/src/hydro/Minimal/hydro_iact.h
+++ b/src/hydro/Minimal/hydro_iact.h
@@ -53,6 +53,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_density(
 
   float wi, wj, wi_dx, wj_dx;
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Get r. */
   const float r_inv = 1.0f / sqrtf(r2);
   const float r = r2 * r_inv;
@@ -80,6 +87,33 @@ __attribute__((always_inline)) INLINE static void runner_iact_density(
   pj->density.rho_dh -= mi * (hydro_dimension * wj + uj * wj_dx);
   pj->density.wcount += wj;
   pj->density.wcount_dh -= (hydro_dimension * wj + uj * wj_dx);
+
+  /* Compute dv dot r */
+  float dv[3], curlvr[3];
+
+  const float faci = mj * wi_dx * r_inv;
+  const float facj = mi * wj_dx * r_inv;
+
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->density.div_v -= faci * dvdr;
+  pj->density.div_v -= facj * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
+
+  pj->density.rot_v[0] += facj * curlvr[0];
+  pj->density.rot_v[1] += facj * curlvr[1];
+  pj->density.rot_v[2] += facj * curlvr[2];
 }
 
 /**
@@ -100,6 +134,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_density(
 
   float wi, wi_dx;
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Get the masses. */
   const float mj = pj->mass;
 
@@ -115,6 +156,27 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_density(
   pi->density.rho_dh -= mj * (hydro_dimension * wi + ui * wi_dx);
   pi->density.wcount += wi;
   pi->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  /* Compute dv dot r */
+  float dv[3], curlvr[3];
+
+  const float faci = mj * wi_dx * r_inv;
+
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->density.div_v -= faci * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
 }
 
 /**
@@ -133,6 +195,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
     float r2, const float *dx, float hi, float hj, struct part *restrict pi,
     struct part *restrict pj, float a, float H) {
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Cosmological factors entering the EoMs */
   const float fac_mu = pow_three_gamma_minus_five_over_two(a);
   const float a2_Hubble = a * a * H;
@@ -172,20 +241,27 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Add Hubble flow */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Are the particles moving towards each others ? */
-  const float omega_ij = min(dvdr, 0.f);
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Compute sound speeds and signal velocity */
   const float ci = pi->force.soundspeed;
   const float cj = pj->force.soundspeed;
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
+
+  /* Grab balsara switches */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
 
   /* Construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.5f * const_viscosity_alpha * v_sig * mu_ij / rho_ij;
+  const float visc = -0.25f * v_sig * (balsara_i + balsara_j) * mu_ij / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -211,7 +287,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   const float sph_du_term_j = P_over_rho2_j * dvdr * r_inv * wj_dr;
 
   /* Viscosity term */
-  const float visc_du_term = 0.5f * visc_acc_term * dvdr;
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
 
   /* Assemble the energy equation term */
   const float du_dt_i = sph_du_term_i + visc_du_term;
@@ -246,6 +322,13 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
     float r2, const float *dx, float hi, float hj, struct part *restrict pi,
     const struct part *restrict pj, float a, float H) {
 
+#ifdef SWIFT_DEBUG_CHECKS
+  if (pi->time_bin >= time_bin_inhibited)
+    error("Inhibited pi in interaction function!");
+  if (pj->time_bin >= time_bin_inhibited)
+    error("Inhibited pj in interaction function!");
+#endif
+
   /* Cosmological factors entering the EoMs */
   const float fac_mu = pow_three_gamma_minus_five_over_two(a);
   const float a2_Hubble = a * a * H;
@@ -285,20 +368,27 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Add Hubble flow */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Are the particles moving towards each others ? */
-  const float omega_ij = min(dvdr, 0.f);
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Compute sound speeds and signal velocity */
   const float ci = pi->force.soundspeed;
   const float cj = pj->force.soundspeed;
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
+
+  /* Grab balsara switches */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
 
   /* Construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.5f * const_viscosity_alpha * v_sig * mu_ij / rho_ij;
+  const float visc = -0.25f * v_sig * (balsara_i + balsara_j) * mu_ij / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -319,7 +409,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   const float sph_du_term_i = P_over_rho2_i * dvdr * r_inv * wi_dr;
 
   /* Viscosity term */
-  const float visc_du_term = 0.5f * visc_acc_term * dvdr;
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
 
   /* Assemble the energy equation term */
   const float du_dt_i = sph_du_term_i + visc_du_term;
@@ -334,4 +424,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   pi->force.v_sig = max(pi->force.v_sig, v_sig);
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_MINIMAL_HYDRO_IACT_H */
diff --git a/src/hydro/Minimal/hydro_io.h b/src/hydro/Minimal/hydro_io.h
index 879255640fc1a1d6a06a666c80d3860c9c31ab64..1146aa9347d443833cd481103da6f6c57d21fcbf 100644
--- a/src/hydro/Minimal/hydro_io.h
+++ b/src/hydro/Minimal/hydro_io.h
@@ -73,7 +73,7 @@ INLINE static void hydro_read_particles(struct part* parts,
 INLINE static void convert_S(const struct engine* e, const struct part* p,
                              const struct xpart* xp, float* ret) {
 
-  ret[0] = hydro_get_comoving_entropy(p);
+  ret[0] = hydro_get_comoving_entropy(p, xp);
 }
 
 INLINE static void convert_P(const struct engine* e, const struct part* p,
diff --git a/src/hydro/Minimal/hydro_part.h b/src/hydro/Minimal/hydro_part.h
index c33f1b9a214cf9839f1acb965b686d4a4962865c..7697f36ca723875a8b77705eefcf2e2af4605583 100644
--- a/src/hydro/Minimal/hydro_part.h
+++ b/src/hydro/Minimal/hydro_part.h
@@ -34,6 +34,8 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /**
  * @brief Particle fields not needed during the SPH loops over neighbours.
@@ -62,6 +64,12 @@ struct xpart {
   /*! Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the tracers */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /**
@@ -124,6 +132,12 @@ struct part {
       /*! Derivative of density with respect to h */
       float rho_dh;
 
+      /*! Velocity divergence */
+      float div_v;
+
+      /*! Velocity curl */
+      float rot_v[3];
+
     } density;
 
     /**
@@ -150,6 +164,9 @@ struct part {
       /*! Time derivative of smoothing length  */
       float h_dt;
 
+      /*! Balsara switch */
+      float balsara;
+
     } force;
   };
 
@@ -159,6 +176,9 @@ struct part {
   /*! Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/Planetary/hydro.h b/src/hydro/Planetary/hydro.h
index 106e1a96ae57868c94d1077b74e84909ab0f0830..fbe0b6c8b9a8844cab3e38e485923fc7543ec528 100644
--- a/src/hydro/Planetary/hydro.h
+++ b/src/hydro/Planetary/hydro.h
@@ -26,8 +26,8 @@
  * equations) with multiple materials.
  *
  * The thermal variable is the internal energy (u). Simple constant
- * viscosity term without switches is implemented. No thermal conduction
- * term is implemented.
+ * viscosity term with the Balsara (1995) switch (optional).
+ * No thermal conduction term is implemented.
  *
  * This corresponds to equations (43), (44), (45), (101), (103)  and (104) with
  * \f$\beta=3\f$ and \f$\alpha_u=0\f$ of Price, D., Journal of Computational
@@ -37,6 +37,7 @@
 #include "adiabatic_index.h"
 #include "approx_math.h"
 #include "cosmology.h"
+#include "debug.h"
 #include "dimension.h"
 #include "equation_of_state.h"
 #include "hydro_properties.h"
@@ -45,28 +46,32 @@
 #include "minmax.h"
 
 /*
- * Note: Define PLANETARY_SPH_BALSARA to use the Balsara (1995) switch for
- * the artificial viscosity, instead of the default Monaghan (1992).
- * i.e. compile with:  make CFLAGS=-DPLANETARY_SPH_BALSARA  to use.
+ * Note: Define PLANETARY_SPH_NO_BALSARA to disable the Balsara (1995) switch
+ * for the artificial viscosity and use the vanilla Monaghan (1992) instead.
+ * i.e. compile with:  make CFLAGS=-DPLANETARY_SPH_NO_BALSARA
  */
 
 /**
- * @brief Returns the comoving internal energy of a particle
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not internal energy, this function computes the internal
  * energy from the thermodynamic variable.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return p->u;
+  return xp->u_full;
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not internal energy, this function computes the internal
@@ -74,12 +79,40 @@ hydro_get_comoving_internal_energy(const struct part *restrict p) {
  * physical coordinates.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
   return p->u * cosmo->a_factor_internal_energy;
 }
 
@@ -120,11 +153,12 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
  * the thermodynamic variable.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return gas_entropy_from_internal_energy(p->rho, p->u, p->mat_id);
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full, p->mat_id);
 }
 
 /**
@@ -136,10 +170,40 @@ __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
  * physical coordinates.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full, p->mat_id);
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u, p->mat_id);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -244,12 +308,27 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return p->u_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->u_dt * cosmo->a_factor_internal_energy;
+}
+
 /**
  * @brief Returns the time derivative of internal energy of a particle
  *
@@ -258,12 +337,47 @@ __attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
  * @param p The particle of interest.
  * @param du_dt The new time derivative of the internal energy.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
 
   p->u_dt = du_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->u_dt = du_dt / cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  const float comoving_entropy = entropy;
+  xp->u_full =
+      gas_internal_energy_from_entropy(p->rho, comoving_entropy, p->mat_id);
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -393,12 +507,15 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
 
-#ifdef PLANETARY_SPH_BALSARA
   const float fac_mu = cosmo->a_factor_mu;
 
   /* Compute the norm of the curl */
@@ -408,7 +525,6 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
 
   /* Compute the norm of div v */
   const float abs_div_v = fabsf(p->density.div_v);
-#endif  // PLANETARY_SPH_BALSARA
 
   /* Compute the pressure */
   const float pressure =
@@ -420,30 +536,28 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
 
   /* Compute the "grad h" term */
   const float rho_inv = 1.f / p->rho;
-  float grad_h_term;
-  const float grad_h_term_inv =
-      1.f + hydro_dimension_inv * p->h * p->density.rho_dh * rho_inv;
-  /* Avoid 1/0 from only having one neighbour right at the edge of the kernel */
-  if (grad_h_term_inv != 0.f) {
-    grad_h_term = 1.f / grad_h_term_inv;
-  } else {
-    grad_h_term = 0.f;
+  float rho_dh = p->density.rho_dh;
+  /* Ignore changing-kernel effects when h is h_max */
+  if (p->h == hydro_props->h_max) {
+    rho_dh = 0.f;
   }
+  const float grad_h_term =
+      1.f / (1.f + hydro_dimension_inv * p->h * rho_dh * rho_inv);
 
-#ifdef PLANETARY_SPH_BALSARA
   /* Compute the Balsara switch */
+#ifdef PLANETARY_SPH_NO_BALSARA
+  const float balsara = hydro_props->viscosity.alpha;
+#else
   const float balsara =
-      abs_div_v / (abs_div_v + curl_v + 0.0001f * fac_mu * soundspeed / p->h);
-#endif  // PLANETARY_SPH_BALSARA
+      hydro_props->viscosity.alpha * abs_div_v /
+      (abs_div_v + curl_v + 0.0001f * fac_mu * soundspeed / p->h);
+#endif
 
   /* Update variables. */
   p->force.f = grad_h_term;
   p->force.pressure = pressure;
   p->force.soundspeed = soundspeed;
-
-#ifdef PLANETARY_SPH_BALSARA
   p->force.balsara = balsara;
-#endif  // PLANETARY_SPH_BALSARA
 }
 
 /**
@@ -611,7 +725,7 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
 
   /* Compute the pressure */
   const float pressure =
@@ -639,6 +753,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
   xp->v_full[2] = p->v[2];
@@ -668,4 +783,17 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->u = u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {
+
+  printParticle_single(p, xp);
+}
+
 #endif /* SWIFT_PLANETARY_HYDRO_H */
diff --git a/src/hydro/Planetary/hydro_debug.h b/src/hydro/Planetary/hydro_debug.h
index 74261f3b49e2881af1c403013005560efa53a7f1..306f7526404599a051f83dc1b61886ed2aa5b69e 100644
--- a/src/hydro/Planetary/hydro_debug.h
+++ b/src/hydro/Planetary/hydro_debug.h
@@ -42,12 +42,13 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "v_full=[%.3g, %.3g, %.3g], a=[%.3g, %.3g, %.3g], \n "
       "m=%.3g, u=%.3g, du/dt=%.3g, P=%.3g, c_s=%.3g, \n "
       "v_sig=%.3g, h=%.3g, dh/dt=%.3g, wcount=%.3g, rho=%.3g, \n "
-      "dh_drho=%.3g, time_bin=%d, mat_id=%d \n",
+      "dh_drho=%.3g, time_bin=%d, wakeup=%d mat_id=%d \n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
       p->mass, p->u, p->u_dt, hydro_get_comoving_pressure(p),
       p->force.soundspeed, p->force.v_sig, p->h, p->force.h_dt,
-      p->density.wcount, p->rho, p->density.rho_dh, p->time_bin, p->mat_id);
+      p->density.wcount, p->rho, p->density.rho_dh, p->time_bin, p->wakeup,
+      p->mat_id);
 }
 
 #endif /* SWIFT_PLANETARY_HYDRO_DEBUG_H */
diff --git a/src/hydro/Planetary/hydro_iact.h b/src/hydro/Planetary/hydro_iact.h
index bf96034696806e3adff1d8ba7f385af65461b9ea..afebb6a406bd310f38d51dcb32fc25da6b2674b5 100644
--- a/src/hydro/Planetary/hydro_iact.h
+++ b/src/hydro/Planetary/hydro_iact.h
@@ -25,8 +25,8 @@
  * @brief Minimal conservative implementation of SPH (Neighbour loop equations)
  *
  * The thermal variable is the internal energy (u). Simple constant
- * viscosity term without switches is implemented. No thermal conduction
- * term is implemented.
+ * viscosity term with the Balsara (1995) switch (optional).
+ * No thermal conduction term is implemented.
  *
  * This corresponds to equations (43), (44), (45), (101), (103)  and (104) with
  * \f$\beta=3\f$ and \f$\alpha_u=0\f$ of Price, D., Journal of Computational
@@ -176,11 +176,9 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
                      (pi->v[1] - pj->v[1]) * dx[1] +
                      (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
 
-#ifdef PLANETARY_SPH_BALSARA
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
   const float balsara_j = pj->force.balsara;
-#endif  // PLANETARY_SPH_BALSARA
 
   /* Are the particles moving towards each other? */
   const float omega_ij = min(dvdr, 0.f);
@@ -189,16 +187,11 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   /* Compute sound speeds and signal velocity */
   const float ci = pi->force.soundspeed;
   const float cj = pj->force.soundspeed;
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Now construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-#ifdef PLANETARY_SPH_BALSARA
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
-#else
-  const float visc = -0.5f * const_viscosity_alpha * v_sig * mu_ij / rho_ij;
-#endif  // PLANETARY_SPH_BALSARA
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -300,11 +293,9 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
                      (pi->v[1] - pj->v[1]) * dx[1] +
                      (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
 
-#ifdef PLANETARY_SPH_BALSARA
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
   const float balsara_j = pj->force.balsara;
-#endif  // PLANETARY_SPH_BALSARA
 
   /* Are the particles moving towards each other? */
   const float omega_ij = min(dvdr, 0.f);
@@ -315,16 +306,11 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   const float cj = pj->force.soundspeed;
 
   /* Signal velocity */
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-#ifdef PLANETARY_SPH_BALSARA
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
-#else
-  const float visc = -0.5f * const_viscosity_alpha * v_sig * mu_ij / rho_ij;
-#endif  // PLANETARY_SPH_BALSARA
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -360,4 +346,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   pi->force.v_sig = max(pi->force.v_sig, v_sig);
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_PLANETARY_HYDRO_IACT_H */
diff --git a/src/hydro/Planetary/hydro_io.h b/src/hydro/Planetary/hydro_io.h
index afb37d884494fd02e30c143194804a2b49a77be0..1b84f8d6db295694846ffd26a422ce158aad0c60 100644
--- a/src/hydro/Planetary/hydro_io.h
+++ b/src/hydro/Planetary/hydro_io.h
@@ -25,8 +25,8 @@
  * @brief Minimal conservative implementation of SPH (i/o routines)
  *
  * The thermal variable is the internal energy (u). Simple constant
- * viscosity term without switches is implemented. No thermal conduction
- * term is implemented.
+ * viscosity term with the Balsara (1995) switch (optional).
+ * No thermal conduction term is implemented.
  *
  * This corresponds to equations (43), (44), (45), (101), (103)  and (104) with
  * \f$\beta=3\f$ and \f$\alpha_u=0\f$ of
@@ -76,7 +76,7 @@ INLINE static void hydro_read_particles(struct part* parts,
 INLINE static void convert_S(const struct engine* e, const struct part* p,
                              const struct xpart* xp, float* ret) {
 
-  ret[0] = hydro_get_comoving_entropy(p);
+  ret[0] = hydro_get_comoving_entropy(p, xp);
 }
 
 INLINE static void convert_P(const struct engine* e, const struct part* p,
@@ -197,14 +197,14 @@ INLINE static void hydro_write_flavour(hid_t h_grpsph) {
   /* Viscosity and thermal conduction */
   /* Nothing in this minimal model... */
   io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment");
-#ifdef PLANETARY_SPH_BALSARA
+#ifdef PLANETARY_SPH_NO_BALSARA
+  io_write_attribute_s(h_grpsph, "Viscosity Model",
+                       "Minimal treatment as in Monaghan (1992)");
+#else
   io_write_attribute_s(
       h_grpsph, "Viscosity Model",
       "as in Springel (2005), i.e. Monaghan (1992) with Balsara (1995) switch");
-#else
-  io_write_attribute_s(h_grpsph, "Viscosity Model",
-                       "Minimal treatment as in Monaghan (1992)");
-#endif  // PLANETARY_SPH_BALSARA
+#endif
 
   /* Time integration properties */
   io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt",
diff --git a/src/hydro/Planetary/hydro_part.h b/src/hydro/Planetary/hydro_part.h
index 7d1fc8f6729992bfdf2eeaba6e33cc9a7b071655..b2725ca1fceddb196a6b2be42b768eb3f88f1101 100644
--- a/src/hydro/Planetary/hydro_part.h
+++ b/src/hydro/Planetary/hydro_part.h
@@ -25,8 +25,8 @@
  * @brief Minimal conservative implementation of SPH (Particle definition)
  *
  * The thermal variable is the internal energy (u). Simple constant
- * viscosity term without switches is implemented. No thermal conduction
- * term is implemented.
+ * viscosity term with the Balsara (1995) switch (optional).
+ * No thermal conduction term is implemented.
  *
  * This corresponds to equations (43), (44), (45), (101), (103)  and (104) with
  * \f$\beta=3\f$ and \f$\alpha_u=0\f$ of Price, D., Journal of Computational
@@ -36,6 +36,8 @@
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
 #include "equation_of_state.h"  // For enum material_id
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /**
  * @brief Particle fields not needed during the SPH loops over neighbours.
@@ -64,6 +66,12 @@ struct xpart {
   /*! Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /*! Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /* Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /**
@@ -126,13 +134,11 @@ struct part {
       /*! Derivative of density with respect to h */
       float rho_dh;
 
-#ifdef PLANETARY_SPH_BALSARA
       /*! Velocity divergence. */
       float div_v;
 
       /*! Velocity curl. */
       float rot_v[3];
-#endif  // PLANETARY_SPH_BALSARA
 
     } density;
 
@@ -160,10 +166,8 @@ struct part {
       /*! Time derivative of smoothing length  */
       float h_dt;
 
-#ifdef PLANETARY_SPH_BALSARA
       /*! Balsara switch */
       float balsara;
-#endif  // PLANETARY_SPH_BALSARA
 
     } force;
   };
@@ -177,6 +181,9 @@ struct part {
   /*! Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/PressureEnergy/hydro.h b/src/hydro/PressureEnergy/hydro.h
index 5082db6c792701511972a52fd2fb00a6a45f7271..4af00f7a657d61871dc0a82affb04d411e13e047 100644
--- a/src/hydro/PressureEnergy/hydro.h
+++ b/src/hydro/PressureEnergy/hydro.h
@@ -49,22 +49,26 @@
 #include <float.h>
 
 /**
- * @brief Returns the comoving internal energy of a particle
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not internal energy, this function computes the internal
  * energy from the thermodynamic variable.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return p->u;
+  return xp->u_full;
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not internal energy, this function computes the internal
@@ -72,12 +76,40 @@ hydro_get_comoving_internal_energy(const struct part *restrict p) {
  * physical coordinates.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
   return p->u * cosmo->a_factor_internal_energy;
 }
 
@@ -110,33 +142,66 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
 }
 
 /**
- * @brief Returns the comoving entropy of a particle
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not entropy, this function computes the entropy from
  * the thermodynamic variable.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return gas_entropy_from_internal_energy(p->rho, p->u);
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
 }
 
 /**
- * @brief Returns the physical entropy of a particle
+ * @brief Returns the physical entropy of a particle at the last
+ * time the particle was kicked.
  *
  * For implementations where the main thermodynamic variable
  * is not entropy, this function computes the entropy from
  * the thermodynamic variable and converts it to
  * physical coordinates.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -245,12 +310,27 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return p->u_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->u_dt * cosmo->a_factor_internal_energy;
+}
+
 /**
  * @brief Sets the time derivative of internal energy of a particle
  *
@@ -259,12 +339,46 @@ __attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
  * @param p The particle of interest.
  * @param du_dt The new time derivative of the internal energy.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
 
   p->u_dt = du_dt;
 }
 
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->u_dt = du_dt / cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  const float comoving_entropy = entropy;
+  xp->u_full = gas_internal_energy_from_entropy(p->rho, comoving_entropy);
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -375,8 +489,10 @@ __attribute__((always_inline)) INLINE static void hydro_end_density(
   p->density.rot_v[1] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
   p->density.rot_v[2] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
 
-  /* Finish calculation of the velocity divergence */
-  p->density.div_v *= h_inv_dim_plus_one * rho_inv * a_inv2;
+  /* Finish calculation of the velocity divergence, including hubble flow term
+   */
+  p->density.div_v *=
+      h_inv_dim_plus_one * rho_inv * a_inv2 + cosmo->H * hydro_dimension;
 }
 
 /**
@@ -427,12 +543,16 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
 
-  const float fac_mu = cosmo->a_factor_mu;
+  const float fac_B = cosmo->a_factor_Balsara_eps;
 
   /* Compute the norm of the curl */
   const float curl_v = sqrtf(p->density.rot_v[0] * p->density.rot_v[0] +
@@ -447,7 +567,8 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
 
   /* Compute the Balsara switch */
   const float balsara =
-      abs_div_v / (abs_div_v + curl_v + 0.0001f * soundspeed * fac_mu / p->h);
+      hydro_props->viscosity.alpha * abs_div_v /
+      (abs_div_v + curl_v + 0.0001f * soundspeed * fac_B / p->h);
 
   /* Compute the "grad h" term */
   const float common_factor = p->h / (hydro_dimension * p->density.wcount);
@@ -578,12 +699,16 @@ __attribute__((always_inline)) INLINE static void hydro_end_force(
  * @param p The particle to act upon.
  * @param xp The particle extended data to act upon.
  * @param dt_therm The time-step for this kick (for thermodynamic quantities).
+ * @param dt_grav The time-step for this kick (for gravity quantities).
+ * @param dt_hydro The time-step for this kick (for hydro quantities).
+ * @param dt_kick_corr The time-step for this kick (for gravity corrections).
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme
  */
 __attribute__((always_inline)) INLINE static void hydro_kick_extra(
     struct part *restrict p, struct xpart *restrict xp, float dt_therm,
     float dt_grav, float dt_hydro, float dt_kick_corr,
-    const struct cosmology *cosmo,
-    const struct hydro_props *restrict hydro_properties) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
 
   /* Do not decrease the energy by more than a factor of 2*/
   if (dt_therm > 0. && p->u_dt * dt_therm < -0.5f * xp->u_full) {
@@ -591,6 +716,14 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
   }
   xp->u_full += p->u_dt * dt_therm;
 
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u_dt = 0.f;
+  }
+
   /* Compute the sound speed */
   const float soundspeed = hydro_get_comoving_soundspeed(p);
 
@@ -607,10 +740,31 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  *
  * @param p The particle to act upon
  * @param xp The extended particle to act upon
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {}
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Convert the physcial internal energy to the comoving one. */
+  /* u' = a^(3(g-1)) u */
+  const float factor = 1.f / cosmo->a_factor_internal_energy;
+  p->u *= factor;
+  xp->u_full = p->u;
+
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u = min_energy;
+    p->u_dt = 0.f;
+  }
+
+  /* Note that unlike Minimal the pressure and sound speed cannot be calculated
+   * here because they are smoothed properties in this scheme. */
+}
 
 /**
  * @brief Initialises the particles for the first time
@@ -626,6 +780,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   xp->v_full[0] = p->v[0];
   xp->v_full[1] = p->v[1];
   xp->v_full[2] = p->v[2];
@@ -655,4 +810,14 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->u = u_init;
 }
 
-#endif /* SWIFT_MINIMAL_HYDRO_H */
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
+#endif /* SWIFT_PRESSURE_ENERGY_HYDRO_H */
diff --git a/src/hydro/PressureEnergy/hydro_debug.h b/src/hydro/PressureEnergy/hydro_debug.h
index 6324167f12726e155eeaa3359be9741aca3a1e42..7ffc370ed4d6abd273fc3d8d5b887f5ccf8e001c 100644
--- a/src/hydro/PressureEnergy/hydro_debug.h
+++ b/src/hydro/PressureEnergy/hydro_debug.h
@@ -32,12 +32,12 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "u=%.3e, du/dt=%.3e v_sig=%.3e, P=%.3e\n"
       "h=%.3e, dh/dt=%.3e wcount=%d, m=%.3e, dh_drho=%.3e, rho=%.3e, \n"
       "p_dh=%.3e, p_bar=%.3e \n"
-      "time_bin=%d\n",
+      "time_bin=%d wakeup=%d\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
       p->u, p->u_dt, p->force.v_sig, hydro_get_comoving_pressure(p), p->h,
       p->force.h_dt, (int)p->density.wcount, p->mass, p->density.rho_dh, p->rho,
-      p->density.pressure_bar_dh, p->pressure_bar, p->time_bin);
+      p->density.pressure_bar_dh, p->pressure_bar, p->time_bin, p->wakeup);
 }
 
 #endif /* SWIFT_MINIMAL_HYDRO_DEBUG_H */
diff --git a/src/hydro/PressureEnergy/hydro_iact.h b/src/hydro/PressureEnergy/hydro_iact.h
index 65c46a55554d4a8f09b32bb6eb1deb1fdcfc932a..ae154ea549a52cb24ed7c69453533b7d59b39a85 100644
--- a/src/hydro/PressureEnergy/hydro_iact.h
+++ b/src/hydro/PressureEnergy/hydro_iact.h
@@ -17,8 +17,8 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_MINIMAL_HYDRO_IACT_H
-#define SWIFT_MINIMAL_HYDRO_IACT_H
+#ifndef SWIFT_PRESSURE_ENERGY_HYDRO_IACT_H
+#define SWIFT_PRESSURE_ENERGY_HYDRO_IACT_H
 
 /**
  * @file PressureEnergy/hydro_iact.h
@@ -232,16 +232,19 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Are the part*icles moving towards each others ? */
-  const float omega_ij = min(dvdr, 0.f);
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Compute sound speeds and signal velocity */
   const float ci = pi->force.soundspeed;
   const float cj = pj->force.soundspeed;
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
@@ -249,8 +252,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
 
   /* Construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -282,7 +284,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
                               wj_dr * dvdr * r_inv;
 
   /* Viscosity term */
-  const float visc_du_term = 0.5f * visc_acc_term * dvdr;
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
 
   /* Assemble the energy equation term */
   const float du_dt_i = sph_du_term_i + visc_du_term;
@@ -357,16 +359,19 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   /* Compute dv dot r. */
   const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
                      (pi->v[1] - pj->v[1]) * dx[1] +
-                     (pi->v[2] - pj->v[2]) * dx[2] + a2_Hubble * r2;
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
 
   /* Are the part*icles moving towards each others ? */
-  const float omega_ij = min(dvdr, 0.f);
+  const float omega_ij = min(dvdr_Hubble, 0.f);
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Compute sound speeds and signal velocity */
   const float ci = pi->force.soundspeed;
   const float cj = pj->force.soundspeed;
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Balsara term */
   const float balsara_i = pi->force.balsara;
@@ -374,8 +379,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
 
   /* Construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Convolve with the kernel */
   const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
@@ -400,7 +404,7 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
                               wi_dr * dvdr * r_inv;
 
   /* Viscosity term */
-  const float visc_du_term = 0.5f * visc_acc_term * dvdr;
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
 
   /* Assemble the energy equation term */
   const float du_dt_i = sph_du_term_i + visc_du_term;
@@ -414,5 +418,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   /* Update the signal velocity. */
   pi->force.v_sig = max(pi->force.v_sig, v_sig);
 }
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
 
-#endif /* SWIFT_MINIMAL_HYDRO_IACT_H */
+#endif /* SWIFT_PRESSURE_ENERGY_HYDRO_IACT_H */
diff --git a/src/hydro/PressureEnergy/hydro_io.h b/src/hydro/PressureEnergy/hydro_io.h
index 78967faec218f0efffbb624c4e8d25af214aad94..701c12283bf77acef4af77598f57705a2b364fa1 100644
--- a/src/hydro/PressureEnergy/hydro_io.h
+++ b/src/hydro/PressureEnergy/hydro_io.h
@@ -17,8 +17,8 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_PRESSURE_ENERGY_HYDRO_IACT_H
-#define SWIFT_PRESSURE_ENERGY_HYDRO_IACT_H
+#ifndef SWIFT_PRESSURE_ENERGY_HYDRO_IO_H
+#define SWIFT_PRESSURE_ENERGY_HYDRO_IO_H
 /**
  * @file PressureEnergy/hydro_io.h
  * @brief P-U implementation of SPH (i/o routines)
@@ -68,16 +68,10 @@ INLINE static void hydro_read_particles(struct part* parts,
                                 UNIT_CONV_DENSITY, parts, rho);
 }
 
-INLINE static void convert_u(const struct engine* e, const struct part* p,
-                             const struct xpart* xp, float* ret) {
-
-  ret[0] = hydro_get_comoving_internal_energy(p);
-}
-
 INLINE static void convert_S(const struct engine* e, const struct part* p,
                              const struct xpart* xp, float* ret) {
 
-  ret[0] = hydro_get_comoving_entropy(p);
+  ret[0] = hydro_get_comoving_entropy(p, xp);
 }
 
 INLINE static void convert_P(const struct engine* e, const struct part* p,
@@ -136,6 +130,15 @@ INLINE static void convert_part_vel(const struct engine* e,
   ret[2] *= cosmo->a_inv;
 }
 
+INLINE static void convert_part_potential(const struct engine* e,
+                                          const struct part* p,
+                                          const struct xpart* xp, float* ret) {
+  if (p->gpart != NULL)
+    ret[0] = gravity_get_comoving_potential(p->gpart);
+  else
+    ret[0] = 0.f;
+}
+
 /**
  * @brief Specifies which particle fields to write to a dataset
  *
@@ -148,7 +151,7 @@ INLINE static void hydro_write_particles(const struct part* parts,
                                          struct io_props* list,
                                          int* num_fields) {
 
-  *num_fields = 9;
+  *num_fields = 10;
 
   /* List what we want to write */
   list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3,
@@ -160,9 +163,8 @@ INLINE static void hydro_write_particles(const struct part* parts,
       io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
   list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
                                  parts, h);
-  list[4] = io_make_output_field_convert_part("InternalEnergy", FLOAT, 1,
-                                              UNIT_CONV_ENERGY_PER_UNIT_MASS,
-                                              parts, xparts, convert_u);
+  list[4] = io_make_output_field("InternalEnergy", FLOAT, 1,
+                                 UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
   list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
                                  UNIT_CONV_NO_UNITS, parts, id);
   list[6] =
@@ -172,6 +174,9 @@ INLINE static void hydro_write_particles(const struct part* parts,
   list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1,
                                               UNIT_CONV_ENTROPY_PER_UNIT_MASS,
                                               parts, xparts, convert_S);
+  list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1,
+                                              UNIT_CONV_POTENTIAL, parts,
+                                              xparts, convert_part_potential);
 }
 
 /**
diff --git a/src/hydro/PressureEnergy/hydro_part.h b/src/hydro/PressureEnergy/hydro_part.h
index bc7d14b612556dc722ecca67dd6ce823192e00f0..20c326da443e4acd1c3bdc0ebd01cce81bb6bad7 100644
--- a/src/hydro/PressureEnergy/hydro_part.h
+++ b/src/hydro/PressureEnergy/hydro_part.h
@@ -33,6 +33,8 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /**
  * @brief Particle fields not needed during the SPH loops over neighbours.
@@ -61,6 +63,12 @@ struct xpart {
   /*! Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /*! Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /*! Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /**
@@ -168,6 +176,9 @@ struct part {
   /*! Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro.h
new file mode 100644
index 0000000000000000000000000000000000000000..b50ca4e2543af94573ff34954c26a23200b78a1d
--- /dev/null
+++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro.h
@@ -0,0 +1,833 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_H
+#define SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_H
+
+/**
+ * @file PressureEnergy/hydro.h
+ * @brief P-U conservative implementation of SPH (Non-neighbour loop
+ * equations)
+ *
+ * The thermal variable is the internal energy (u). A simple variable
+ * viscosity term (Morris & Monaghan 1997) with a Balsara switch is
+ * implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * This implementation corresponds to the one presented in the SWIFT
+ * documentation and in Hopkins, "A general class of Lagrangian smoothed
+ * particle hydrodynamics methods and implications for fluid mixing problems",
+ * MNRAS, 2013.
+ */
+
+#include "adiabatic_index.h"
+#include "approx_math.h"
+#include "cosmology.h"
+#include "dimension.h"
+#include "equation_of_state.h"
+#include "hydro_properties.h"
+#include "hydro_space.h"
+#include "kernel_hydro.h"
+#include "minmax.h"
+
+#include <float.h>
+
+/**
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not internal energy, this function computes the internal
+ * energy from the thermodynamic variable.
+ *
+ * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
+
+  return xp->u_full;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
+ *
+ * For implementations where the main thermodynamic variable
+ * is not internal energy, this function computes the internal
+ * energy from the thermodynamic variable and converts it to
+ * physical coordinates.
+ *
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
+                                   const struct cosmology *cosmo) {
+
+  return xp->u_full * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving pressure of a particle
+ *
+ * Computes the pressure based on the particle's properties.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_pressure(
+    const struct part *restrict p) {
+
+  return p->pressure_bar;
+}
+
+/**
+ * @brief Returns the physical pressure of a particle
+ *
+ * Computes the pressure based on the particle's properties and
+ * convert it to physical coordinates.
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
+    const struct part *restrict p, const struct cosmology *cosmo) {
+
+  return cosmo->a_factor_pressure * p->pressure_bar;
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle
+ *
+ * For implementations where the main thermodynamic variable
+ * is not entropy, this function computes the entropy from
+ * the thermodynamic variable.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
+    const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle
+ *
+ * For implementations where the main thermodynamic variable
+ * is not entropy, this function computes the entropy from
+ * the thermodynamic variable and converts it to
+ * physical coordinates.
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, xp->u_full);
+}
+
+/**
+ * @brief Returns the comoving sound speed of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_soundspeed(const struct part *restrict p) {
+
+  /* Compute the sound speed -- see theory section for justification */
+  /* IDEAL GAS ONLY -- P-U does not work with generic EoS. */
+  const float square_rooted = sqrtf(hydro_gamma * p->pressure_bar / p->rho);
+
+  return square_rooted;
+}
+
+/**
+ * @brief Returns the physical sound speed of a particle
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_soundspeed(const struct part *restrict p,
+                              const struct cosmology *cosmo) {
+
+  return cosmo->a_factor_sound_speed * p->force.soundspeed;
+}
+
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return p->u;
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
+  return p->u * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return gas_entropy_from_internal_energy(p->rho, p->u);
+}
+
+/**
+ * @brief Returns the comoving density of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_comoving_density(
+    const struct part *restrict p) {
+
+  return p->rho;
+}
+
+/**
+ * @brief Returns the comoving density of a particle.
+ *
+ * @param p The particle of interest
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_physical_density(
+    const struct part *restrict p, const struct cosmology *cosmo) {
+
+  return cosmo->a3_inv * p->rho;
+}
+
+/**
+ * @brief Returns the mass of a particle
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float hydro_get_mass(
+    const struct part *restrict p) {
+
+  return p->mass;
+}
+
+/**
+ * @brief Sets the mass of a particle
+ *
+ * @param p The particle of interest
+ * @param m The mass to set.
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_mass(
+    struct part *restrict p, float m) {
+
+  p->mass = m;
+}
+
+/**
+ * @brief Returns the velocities drifted to the current time of a particle.
+ *
+ * @param p The particle of interest
+ * @param xp The extended data of the particle.
+ * @param dt_kick_hydro The time (for hydro accelerations) since the last kick.
+ * @param dt_kick_grav The time (for gravity accelerations) since the last kick.
+ * @param v (return) The velocities at the current time.
+ */
+__attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
+    const struct part *restrict p, const struct xpart *xp, float dt_kick_hydro,
+    float dt_kick_grav, float v[3]) {
+
+  v[0] = xp->v_full[0] + p->a_hydro[0] * dt_kick_hydro +
+         xp->a_grav[0] * dt_kick_grav;
+  v[1] = xp->v_full[1] + p->a_hydro[1] * dt_kick_hydro +
+         xp->a_grav[1] * dt_kick_grav;
+  v[2] = xp->v_full[2] + p->a_hydro[2] * dt_kick_hydro +
+         xp->a_grav[2] * dt_kick_grav;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
+
+  return p->u_dt;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest
+ * @param cosmo Cosmology data structure
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return p->u_dt * cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
+
+  p->u_dt = du_dt;
+}
+
+/**
+ * @brief Returns the time derivative of internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The new time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *cosmo,
+                                      float du_dt) {
+
+  p->u_dt = du_dt / cosmo->a_factor_internal_energy;
+}
+
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  const float comoving_entropy = entropy;
+  xp->u_full = gas_internal_energy_from_entropy(p->rho, comoving_entropy);
+}
+
+/**
+ * @brief Computes the hydro time-step of a given particle
+ *
+ * This function returns the time-step of a particle given its hydro-dynamical
+ * state. A typical time-step calculation would be the use of the CFL condition.
+ *
+ * @param p Pointer to the particle data
+ * @param xp Pointer to the extended particle data
+ * @param hydro_properties The SPH parameters
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float hydro_compute_timestep(
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct hydro_props *restrict hydro_properties,
+    const struct cosmology *restrict cosmo) {
+
+  const float CFL_condition = hydro_properties->CFL_condition;
+
+  /* CFL condition */
+  const float dt_cfl = 2.f * kernel_gamma * CFL_condition * cosmo->a * p->h /
+                       (cosmo->a_factor_sound_speed * p->force.v_sig);
+
+  const float dt_u_change =
+      (p->u_dt != 0.0f) ? fabsf(const_max_u_change * p->u / p->u_dt) : FLT_MAX;
+
+  return fminf(dt_cfl, dt_u_change);
+}
+
+/**
+ * @brief Does some extra hydro operations once the actual physical time step
+ * for the particle is known.
+ *
+ * @param p The particle to act upon.
+ * @param dt Physical time step of the particle during the next step.
+ */
+__attribute__((always_inline)) INLINE static void hydro_timestep_extra(
+    struct part *p, float dt) {}
+
+/**
+ * @brief Prepares a particle for the density calculation.
+ *
+ * Zeroes all the relevant arrays in preparation for the sums taking place in
+ * the various density loop over neighbours. Typically, all fields of the
+ * density sub-structure of a particle get zeroed in here.
+ *
+ * @param p The particle to act upon
+ * @param hs #hydro_space containing hydro specific space information.
+ */
+__attribute__((always_inline)) INLINE static void hydro_init_part(
+    struct part *restrict p, const struct hydro_space *hs) {
+
+  p->density.wcount = 0.f;
+  p->density.wcount_dh = 0.f;
+  p->rho = 0.f;
+  p->density.rho_dh = 0.f;
+  p->pressure_bar = 0.f;
+  p->density.pressure_bar_dh = 0.f;
+
+  p->density.div_v = 0.f;
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
+}
+
+/**
+ * @brief Finishes the density calculation.
+ *
+ * Multiplies the density and number of neighbours by the appropiate constants
+ * and add the self-contribution term.
+ * Additional quantities such as velocity gradients will also get the final
+ * terms added to them here.
+ *
+ * Also adds/multiplies the cosmological terms if need be.
+ *
+ * @param p The particle to act upon
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_end_density(
+    struct part *restrict p, const struct cosmology *cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv);       /* 1/h^d */
+  const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */
+
+  /* Final operation on the density (add self-contribution). */
+  p->rho += p->mass * kernel_root;
+  p->density.rho_dh -= hydro_dimension * p->mass * kernel_root;
+  p->pressure_bar += p->mass * p->u * kernel_root;
+  p->density.pressure_bar_dh -= hydro_dimension * p->mass * p->u * kernel_root;
+  p->density.wcount += kernel_root;
+  p->density.wcount_dh -= hydro_dimension * kernel_root;
+
+  /* Finish the calculation by inserting the missing h-factors */
+  p->rho *= h_inv_dim;
+  p->density.rho_dh *= h_inv_dim_plus_one;
+  p->pressure_bar *= (h_inv_dim * hydro_gamma_minus_one);
+  p->density.pressure_bar_dh *= (h_inv_dim_plus_one * hydro_gamma_minus_one);
+  p->density.wcount *= h_inv_dim;
+  p->density.wcount_dh *= h_inv_dim_plus_one;
+
+  const float rho_inv = 1.f / p->rho;
+  const float a_inv2 = cosmo->a2_inv;
+
+  /* Finish calculation of the velocity curl components */
+  p->density.rot_v[0] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[1] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+  p->density.rot_v[2] *= h_inv_dim_plus_one * a_inv2 * rho_inv;
+
+  /* Finish calculation of the velocity divergence */
+  p->density.div_v *=
+      h_inv_dim_plus_one * rho_inv * a_inv2 + cosmo->H * hydro_dimension;
+}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
+ *
+ * In the desperate case where a particle has no neighbours (likely because
+ * of the h_max ceiling), set the particle fields to something sensible to avoid
+ * NaNs in the next calculations.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = p->h;
+  const float h_inv = 1.0f / h;                 /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv); /* 1/h^d */
+
+  /* Re-set problematic values */
+  p->rho = p->mass * kernel_root * h_inv_dim;
+  p->pressure_bar =
+      p->mass * p->u * hydro_gamma_minus_one * kernel_root * h_inv_dim;
+  p->density.wcount = kernel_root * h_inv_dim;
+  p->density.rho_dh = 0.f;
+  p->density.wcount_dh = 0.f;
+  p->density.pressure_bar_dh = 0.f;
+
+  p->density.div_v = 0.f;
+  p->density.rot_v[0] = 0.f;
+  p->density.rot_v[1] = 0.f;
+  p->density.rot_v[2] = 0.f;
+}
+
+/**
+ * @brief Prepare a particle for the force calculation.
+ *
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
+ */
+__attribute__((always_inline)) INLINE static void hydro_prepare_force(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
+
+  const float fac_B = cosmo->a_factor_Balsara_eps;
+
+  const float h_inv = 1.f / p->h;
+
+  /* Compute the norm of the curl */
+  const float curl_v = sqrtf(p->density.rot_v[0] * p->density.rot_v[0] +
+                             p->density.rot_v[1] * p->density.rot_v[1] +
+                             p->density.rot_v[2] * p->density.rot_v[2]);
+
+  /* Compute the norm of div v */
+  const float abs_div_v = fabsf(p->density.div_v);
+
+  /* Compute the sound speed -- see theory section for justification */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  /* Compute the Balsara switch */
+  const float balsara =
+      abs_div_v / (abs_div_v + curl_v + 0.0001f * soundspeed * fac_B * h_inv);
+
+  /* Compute the "grad h" term */
+  const float common_factor = p->h / (hydro_dimension * p->density.wcount);
+  const float grad_h_term = (p->density.pressure_bar_dh * common_factor *
+                             hydro_one_over_gamma_minus_one) /
+                            (1.f + common_factor * p->density.wcount_dh);
+
+  /* Artificial viscosity updates */
+
+  const float inverse_tau = hydro_props->viscosity.length * soundspeed * h_inv;
+  const float source_term = -1.f * min(p->density.div_v, 0.f);
+
+  /* Compute da/dt */
+  const float alpha_time_differential =
+      source_term + (hydro_props->viscosity.alpha_min - p->alpha) * inverse_tau;
+
+  /* Update variables. */
+  p->alpha += alpha_time_differential * dt_alpha;
+  p->force.f = grad_h_term;
+  p->force.soundspeed = soundspeed;
+  p->force.balsara = balsara;
+}
+
+/**
+ * @brief Reset acceleration fields of a particle
+ *
+ * Resets all hydro acceleration and time derivative fields in preparation
+ * for the sums taking  place in the various force tasks.
+ *
+ * @param p The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void hydro_reset_acceleration(
+    struct part *restrict p) {
+
+  /* Reset the acceleration. */
+  p->a_hydro[0] = 0.0f;
+  p->a_hydro[1] = 0.0f;
+  p->a_hydro[2] = 0.0f;
+
+  /* Reset the time derivatives. */
+  p->u_dt = 0.0f;
+  p->force.h_dt = 0.0f;
+  p->force.v_sig = p->force.soundspeed;
+}
+
+/**
+ * @brief Sets the values to be predicted in the drifts to their values at a
+ * kick time
+ *
+ * @param p The particle.
+ * @param xp The extended data of this particle.
+ */
+__attribute__((always_inline)) INLINE static void hydro_reset_predicted_values(
+    struct part *restrict p, const struct xpart *restrict xp) {
+
+  /* Re-set the predicted velocities */
+  p->v[0] = xp->v_full[0];
+  p->v[1] = xp->v_full[1];
+  p->v[2] = xp->v_full[2];
+
+  /* Re-set the entropy */
+  p->u = xp->u_full;
+}
+
+/**
+ * @brief Predict additional particle fields forward in time when drifting
+ *
+ * Additional hydrodynamic quantites are drifted forward in time here. These
+ * include thermal quantities (thermal energy or total energy or entropy, ...).
+ *
+ * Note the different time-step sizes used for the different quantities as they
+ * include cosmological factors.
+ *
+ * @param p The particle.
+ * @param xp The extended data of the particle.
+ * @param dt_drift The drift time-step for positions.
+ * @param dt_therm The drift time-step for thermal quantities.
+ */
+__attribute__((always_inline)) INLINE static void hydro_predict_extra(
+    struct part *restrict p, const struct xpart *restrict xp, float dt_drift,
+    float dt_therm) {
+
+  const float h_inv = 1.f / p->h;
+
+  /* Predict smoothing length */
+  const float w1 = p->force.h_dt * h_inv * dt_drift;
+  if (fabsf(w1) < 0.2f)
+    p->h *= approx_expf(w1); /* 4th order expansion of exp(w) */
+  else
+    p->h *= expf(w1);
+
+  /* Predict density and weighted pressure */
+  const float w2 = -hydro_dimension * w1;
+  if (fabsf(w2) < 0.2f) {
+    const float expf_approx =
+        approx_expf(w2); /* 4th order expansion of exp(w) */
+    p->rho *= expf_approx;
+    p->pressure_bar *= expf_approx;
+  } else {
+    const float expf_exact = expf(w2);
+    p->rho *= expf_exact;
+    p->pressure_bar *= expf_exact;
+  }
+
+  /* Predict the internal energy */
+  p->u += p->u_dt * dt_therm;
+
+  /* Compute the new sound speed */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  p->force.soundspeed = soundspeed;
+}
+
+/**
+ * @brief Finishes the force calculation.
+ *
+ * Multiplies the force and accelerations by the appropiate constants
+ * and add the self-contribution term. In most cases, there is little
+ * to do here.
+ *
+ * Cosmological terms are also added/multiplied here.
+ *
+ * @param p The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void hydro_end_force(
+    struct part *restrict p, const struct cosmology *cosmo) {
+
+  p->force.h_dt *= p->h * hydro_dimension_inv;
+}
+
+/**
+ * @brief Kick the additional variables
+ *
+ * Additional hydrodynamic quantites are kicked forward in time here. These
+ * include thermal quantities (thermal energy or total energy or entropy, ...).
+ *
+ * @param p The particle to act upon.
+ * @param xp The particle extended data to act upon.
+ * @param dt_therm The time-step for this kick (for thermodynamic quantities).
+ * @param dt_grav The time-step for this kick (for gravity quantities).
+ * @param dt_hydro The time-step for this kick (for hydro quantities).
+ * @param dt_kick_corr The time-step for this kick (for gravity corrections).
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme
+ */
+__attribute__((always_inline)) INLINE static void hydro_kick_extra(
+    struct part *restrict p, struct xpart *restrict xp, float dt_therm,
+    float dt_grav, float dt_hydro, float dt_kick_corr,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Do not decrease the energy by more than a factor of 2*/
+  if (dt_therm > 0. && p->u_dt * dt_therm < -0.5f * xp->u_full) {
+    p->u_dt = -0.5f * xp->u_full / dt_therm;
+  }
+  xp->u_full += p->u_dt * dt_therm;
+
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u_dt = 0.f;
+  }
+
+  /* Compute the sound speed */
+  const float soundspeed = hydro_get_comoving_soundspeed(p);
+
+  p->force.soundspeed = soundspeed;
+}
+
+/**
+ * @brief Converts hydro quantity of a particle at the start of a run
+ *
+ * This function is called once at the end of the engine_init_particle()
+ * routine (at the start of a calculation) after the densities of
+ * particles have been computed.
+ * This can be used to convert internal energy into entropy for instance.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle to act upon
+ * @param cosmo The cosmological model.
+ * @param hydro_props The constants used in the scheme.
+ */
+__attribute__((always_inline)) INLINE static void hydro_convert_quantities(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
+
+  /* Convert the physcial internal energy to the comoving one. */
+  /* u' = a^(3(g-1)) u */
+  const float factor = 1.f / cosmo->a_factor_internal_energy;
+  p->u *= factor;
+  xp->u_full = p->u;
+
+  /* Apply the minimal energy limit */
+  const float min_energy =
+      hydro_props->minimal_internal_energy / cosmo->a_factor_internal_energy;
+  if (xp->u_full < min_energy) {
+    xp->u_full = min_energy;
+    p->u = min_energy;
+    p->u_dt = 0.f;
+  }
+
+  /* Start out with a 'regular' AV for comparison to other schemes */
+  p->alpha = hydro_props->viscosity.alpha;
+
+  /* Note that unlike Minimal the pressure and sound speed cannot be calculated
+   * here because they are smoothed properties in this scheme. */
+}
+
+/**
+ * @brief Initialises the particles for the first time
+ *
+ * This function is called only once just after the ICs have been
+ * read in to do some conversions or assignments between the particle
+ * and extended particle fields.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ */
+__attribute__((always_inline)) INLINE static void hydro_first_init_part(
+    struct part *restrict p, struct xpart *restrict xp) {
+
+  p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
+  xp->v_full[0] = p->v[0];
+  xp->v_full[1] = p->v[1];
+  xp->v_full[2] = p->v[2];
+  xp->a_grav[0] = 0.f;
+  xp->a_grav[1] = 0.f;
+  xp->a_grav[2] = 0.f;
+  xp->u_full = p->u;
+
+  hydro_reset_acceleration(p);
+  hydro_init_part(p, NULL);
+}
+
+/**
+ * @brief Overwrite the initial internal energy of a particle.
+ *
+ * Note that in the cases where the thermodynamic variable is not
+ * internal energy but gets converted later, we must overwrite that
+ * field. The conversion to the actual variable happens later after
+ * the initial fake time-step.
+ *
+ * @param p The #part to write to.
+ * @param u_init The new initial internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_init_internal_energy(struct part *p, float u_init) {
+
+  p->u = u_init;
+}
+
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
+#endif /* SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_H */
diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_debug.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_debug.h
new file mode 100644
index 0000000000000000000000000000000000000000..d0cd5367f94cd90f36cc2b738a63c7963adbd445
--- /dev/null
+++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_debug.h
@@ -0,0 +1,48 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_DEBUG_H
+#define SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_DEBUG_H
+/**
+ * @file PressureEnergy/hydro_debug.h
+ * @brief P-U conservative implementation of SPH (Debugging routines)
+ *
+ * The thermal variable is the internal energy (u). A simple variable
+ * viscosity term (Morris & Monaghan 1997) with a Balsara switch is
+ * implemented.
+ */
+
+__attribute__((always_inline)) INLINE static void hydro_debug_particle(
+    const struct part* p, const struct xpart* xp) {
+  printf(
+      "x=[%.3e,%.3e,%.3e], "
+      "v=[%.3e,%.3e,%.3e],v_full=[%.3e,%.3e,%.3e] \n a=[%.3e,%.3e,%.3e], "
+      "u=%.3e, du/dt=%.3e v_sig=%.3e, P=%.3e\n"
+      "h=%.3e, dh/dt=%.3e wcount=%d, m=%.3e, dh_drho=%.3e, rho=%.3e, \n"
+      "p_dh=%.3e, p_bar=%.3e \n"
+      "time_bin=%d, wakeup=%d alpha=%.3e\n",
+      p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
+      xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
+      p->u, p->u_dt, p->force.v_sig, hydro_get_comoving_pressure(p), p->h,
+      p->force.h_dt, (int)p->density.wcount, p->mass, p->density.rho_dh, p->rho,
+      p->density.pressure_bar_dh, p->pressure_bar, p->time_bin, p->wakeup,
+      p->alpha);
+}
+
+#endif /* SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_DEBUG_H */
diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_iact.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..69da511c7544a71ef381a0889c8b56c80d5211f1
--- /dev/null
+++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_iact.h
@@ -0,0 +1,451 @@
+/*******************************************************************************
+ * This file is part* of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IACT_H
+#define SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IACT_H
+
+/**
+ * @file PressureEnergy/hydro_iact.h
+ * @brief P-U implementation of SPH (Neighbour loop equations)
+ *
+ * The thermal variable is the internal energy (u). A simple variable
+ * viscosity term (Morris & Monaghan 1997) with a Balsara switch is
+ * implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "adiabatic_index.h"
+#include "minmax.h"
+
+/**
+ * @brief Density interaction between two part*icles.
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_density(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    struct part* pj, float a, float H) {
+
+  float wi, wj, wi_dx, wj_dx;
+  float dv[3], curlvr[3];
+
+  const float r = sqrtf(r2);
+
+  /* Get the masses. */
+  const float mi = pi->mass;
+  const float mj = pj->mass;
+
+  /* Compute density of pi. */
+  const float hi_inv = 1.f / hi;
+  const float ui = r * hi_inv;
+
+  kernel_deval(ui, &wi, &wi_dx);
+
+  pi->rho += mj * wi;
+  pi->density.rho_dh -= mj * (hydro_dimension * wi + ui * wi_dx);
+
+  pi->pressure_bar += mj * wi * pj->u;
+  pi->density.pressure_bar_dh -=
+      mj * pj->u * (hydro_dimension * wi + ui * wi_dx);
+  pi->density.wcount += wi;
+  pi->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  /* Compute density of pj. */
+  const float hj_inv = 1.f / hj;
+  const float uj = r * hj_inv;
+  kernel_deval(uj, &wj, &wj_dx);
+
+  pj->rho += mi * wj;
+  pj->density.rho_dh -= mi * (hydro_dimension * wj + uj * wj_dx);
+  pj->pressure_bar += mi * wj * pi->u;
+  pj->density.pressure_bar_dh -=
+      mi * pi->u * (hydro_dimension * wj + uj * wj_dx);
+  pj->density.wcount += wj;
+  pj->density.wcount_dh -= (hydro_dimension * wj + uj * wj_dx);
+
+  /* Now we need to compute the div terms */
+  const float r_inv = 1.f / r;
+  const float faci = mj * wi_dx * r_inv;
+  const float facj = mi * wj_dx * r_inv;
+
+  /* Compute dv dot r */
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->density.div_v -= faci * dvdr;
+  pj->density.div_v -= facj * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
+
+  /* Negative because of the change in sign of dx & dv. */
+  pj->density.rot_v[0] += facj * curlvr[0];
+  pj->density.rot_v[1] += facj * curlvr[1];
+  pj->density.rot_v[2] += facj * curlvr[2];
+}
+
+/**
+ * @brief Density interaction between two part*icles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_density(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    const struct part* pj, float a, float H) {
+
+  float wi, wi_dx;
+  float dv[3], curlvr[3];
+
+  /* Get the masses. */
+  const float mj = pj->mass;
+
+  /* Get r and r inverse. */
+  const float r = sqrtf(r2);
+
+  const float h_inv = 1.f / hi;
+  const float ui = r * h_inv;
+  kernel_deval(ui, &wi, &wi_dx);
+
+  pi->rho += mj * wi;
+  pi->density.rho_dh -= mj * (hydro_dimension * wi + ui * wi_dx);
+
+  pi->pressure_bar += mj * wi * pj->u;
+
+  pi->density.pressure_bar_dh -=
+      mj * pj->u * (hydro_dimension * wi + ui * wi_dx);
+  pi->density.wcount += wi;
+  pi->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  const float r_inv = 1.f / r;
+  const float faci = mj * wi_dx * r_inv;
+
+  /* Compute dv dot r */
+  dv[0] = pi->v[0] - pj->v[0];
+  dv[1] = pi->v[1] - pj->v[1];
+  dv[2] = pi->v[2] - pj->v[2];
+  const float dvdr = dv[0] * dx[0] + dv[1] * dx[1] + dv[2] * dx[2];
+
+  pi->density.div_v -= faci * dvdr;
+
+  /* Compute dv cross r */
+  curlvr[0] = dv[1] * dx[2] - dv[2] * dx[1];
+  curlvr[1] = dv[2] * dx[0] - dv[0] * dx[2];
+  curlvr[2] = dv[0] * dx[1] - dv[1] * dx[0];
+
+  pi->density.rot_v[0] += faci * curlvr[0];
+  pi->density.rot_v[1] += faci * curlvr[1];
+  pi->density.rot_v[2] += faci * curlvr[2];
+}
+
+/**
+ * @brief Force interaction between two part*icles.
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_force(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    struct part* pj, float a, float H) {
+
+  /* Cosmological factors entering the EoMs */
+  const float fac_mu = pow_three_gamma_minus_five_over_two(a);
+  const float a2_Hubble = a * a * H;
+
+  const float r = sqrtf(r2);
+  const float r_inv = 1.0f / r;
+
+  /* Recover some data */
+  const float mj = pj->mass;
+  const float mi = pi->mass;
+
+  const float miui = mi * pi->u;
+  const float mjuj = mj * pj->u;
+
+  const float rhoi = pi->rho;
+  const float rhoj = pj->rho;
+  /* Compute gradient terms */
+  const float f_ij = 1.f - (pi->force.f / mjuj);
+  const float f_ji = 1.f - (pj->force.f / miui);
+
+  /* Get the kernel for hi. */
+  const float hi_inv = 1.0f / hi;
+  const float hid_inv = pow_dimension_plus_one(hi_inv); /* 1/h^(d+1) */
+  const float xi = r * hi_inv;
+  float wi, wi_dx;
+  kernel_deval(xi, &wi, &wi_dx);
+  const float wi_dr = hid_inv * wi_dx;
+
+  /* Get the kernel for hj. */
+  const float hj_inv = 1.0f / hj;
+  const float hjd_inv = pow_dimension_plus_one(hj_inv); /* 1/h^(d+1) */
+  const float xj = r * hj_inv;
+  float wj, wj_dx;
+  kernel_deval(xj, &wj, &wj_dx);
+  const float wj_dr = hjd_inv * wj_dx;
+
+  /* Compute dv dot r. */
+  const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
+                     (pi->v[1] - pj->v[1]) * dx[1] +
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
+
+  /* Are the part*icles moving towards each others ? */
+  const float omega_ij = min(dvdr_Hubble, 0.f);
+  const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
+
+  /* Compute sound speeds and signal velocity */
+  const float ci = pi->force.soundspeed;
+  const float cj = pj->force.soundspeed;
+  const float v_sig = ci + cj - 3.f * mu_ij;
+
+  /* Balsara term */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
+
+  /* Construct the full viscosity term */
+  const float rho_ij = 0.5f * (rhoi + rhoj);
+  const float alpha = 0.5f * (pi->alpha + pj->alpha);
+  const float visc =
+      -0.25f * alpha * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
+
+  /* Convolve with the kernel */
+  const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
+
+  /* SPH acceleration term */
+  const float sph_acc_term =
+      pj->u * pi->u * hydro_gamma_minus_one * hydro_gamma_minus_one *
+      ((f_ij / pi->pressure_bar) * wi_dr + (f_ji / pj->pressure_bar) * wj_dr) *
+      r_inv;
+
+  /* Assemble the acceleration */
+  const float acc = sph_acc_term + visc_acc_term;
+
+  /* Use the force Luke ! */
+  pi->a_hydro[0] -= mj * acc * dx[0];
+  pi->a_hydro[1] -= mj * acc * dx[1];
+  pi->a_hydro[2] -= mj * acc * dx[2];
+
+  pj->a_hydro[0] += mi * acc * dx[0];
+  pj->a_hydro[1] += mi * acc * dx[1];
+  pj->a_hydro[2] += mi * acc * dx[2];
+
+  /* Get the time derivative for u. */
+  const float sph_du_term_i = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pj->u * pi->u * (f_ij / pi->pressure_bar) *
+                              wi_dr * dvdr * r_inv;
+  const float sph_du_term_j = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pi->u * pj->u * (f_ji / pj->pressure_bar) *
+                              wj_dr * dvdr * r_inv;
+
+  /* Viscosity term */
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
+
+  /* Assemble the energy equation term */
+  const float du_dt_i = sph_du_term_i + visc_du_term;
+  const float du_dt_j = sph_du_term_j + visc_du_term;
+
+  /* Internal energy time derivative */
+  pi->u_dt += du_dt_i * mj;
+  pj->u_dt += du_dt_j * mi;
+
+  /* Get the time derivative for h. */
+  pi->force.h_dt -= mj * dvdr * r_inv / rhoj * wi_dr;
+  pj->force.h_dt -= mi * dvdr * r_inv / rhoi * wj_dr;
+
+  /* Update the signal velocity. */
+  pi->force.v_sig = max(pi->force.v_sig, v_sig);
+  pj->force.v_sig = max(pj->force.v_sig, v_sig);
+}
+
+/**
+ * @brief Force interaction between two part*icles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two part*icles.
+ * @param dx Comoving vector separating both part*icles (pi - pj).
+ * @param hi Comoving smoothing-length of part*icle i.
+ * @param hj Comoving smoothing-length of part*icle j.
+ * @param pi First part*icle.
+ * @param pj Second part*icle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
+    float r2, const float* dx, float hi, float hj, struct part* pi,
+    const struct part* pj, float a, float H) {
+
+  /* Cosmological factors entering the EoMs */
+  const float fac_mu = pow_three_gamma_minus_five_over_two(a);
+  const float a2_Hubble = a * a * H;
+
+  const float r = sqrtf(r2);
+  const float r_inv = 1.0f / r;
+
+  /* Recover some data */
+  // const float mi = pi->mass;
+  const float mj = pj->mass;
+  const float mi = pi->mass;
+
+  const float miui = mi * pi->u;
+  const float mjuj = mj * pj->u;
+
+  const float rhoi = pi->rho;
+  const float rhoj = pj->rho;
+  /* Compute gradient terms */
+  const float f_ij = 1.f - (pi->force.f / mjuj);
+  const float f_ji = 1.f - (pj->force.f / miui);
+
+  /* Get the kernel for hi. */
+  const float hi_inv = 1.0f / hi;
+  const float hid_inv = pow_dimension_plus_one(hi_inv); /* 1/h^(d+1) */
+  const float xi = r * hi_inv;
+  float wi, wi_dx;
+  kernel_deval(xi, &wi, &wi_dx);
+  const float wi_dr = hid_inv * wi_dx;
+
+  /* Get the kernel for hj. */
+  const float hj_inv = 1.0f / hj;
+  const float hjd_inv = pow_dimension_plus_one(hj_inv); /* 1/h^(d+1) */
+  const float xj = r * hj_inv;
+  float wj, wj_dx;
+  kernel_deval(xj, &wj, &wj_dx);
+  const float wj_dr = hjd_inv * wj_dx;
+
+  /* Compute dv dot r. */
+  const float dvdr = (pi->v[0] - pj->v[0]) * dx[0] +
+                     (pi->v[1] - pj->v[1]) * dx[1] +
+                     (pi->v[2] - pj->v[2]) * dx[2];
+
+  /* Includes the hubble flow term; not used for du/dt */
+  const float dvdr_Hubble = dvdr + a2_Hubble * r2;
+
+  /* Are the part*icles moving towards each others ? */
+  const float omega_ij = min(dvdr_Hubble, 0.f);
+  const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
+
+  /* Compute sound speeds and signal velocity */
+  const float ci = pi->force.soundspeed;
+  const float cj = pj->force.soundspeed;
+  const float v_sig = ci + cj - 3.f * mu_ij;
+
+  /* Balsara term */
+  const float balsara_i = pi->force.balsara;
+  const float balsara_j = pj->force.balsara;
+
+  /* Construct the full viscosity term */
+  const float rho_ij = 0.5f * (rhoi + rhoj);
+  const float alpha = 0.5f * (pi->alpha + pj->alpha);
+  const float visc =
+      -0.25f * alpha * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
+
+  /* Convolve with the kernel */
+  const float visc_acc_term = 0.5f * visc * (wi_dr + wj_dr) * r_inv;
+
+  /* SPH acceleration term */
+  const float sph_acc_term =
+      pj->u * pi->u * hydro_gamma_minus_one * hydro_gamma_minus_one *
+      ((f_ij / pi->pressure_bar) * wi_dr + (f_ji / pj->pressure_bar) * wj_dr) *
+      r_inv;
+
+  /* Assemble the acceleration */
+  const float acc = sph_acc_term + visc_acc_term;
+
+  /* Use the force Luke ! */
+  pi->a_hydro[0] -= mj * acc * dx[0];
+  pi->a_hydro[1] -= mj * acc * dx[1];
+  pi->a_hydro[2] -= mj * acc * dx[2];
+
+  /* Get the time derivative for u. */
+  const float sph_du_term_i = hydro_gamma_minus_one * hydro_gamma_minus_one *
+                              pj->u * pi->u * (f_ij / pi->pressure_bar) *
+                              wi_dr * dvdr * r_inv;
+
+  /* Viscosity term */
+  const float visc_du_term = 0.5f * visc_acc_term * dvdr_Hubble;
+
+  /* Assemble the energy equation term */
+  const float du_dt_i = sph_du_term_i + visc_du_term;
+
+  /* Internal energy time derivative */
+  pi->u_dt += du_dt_i * mj;
+
+  /* Get the time derivative for h. */
+  pi->force.h_dt -= mj * dvdr * r_inv / rhoj * wi_dr;
+
+  /* Update the signal velocity. */
+  pi->force.v_sig = max(pi->force.v_sig, v_sig);
+}
+
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float* dx, float hi, float hj, struct part* restrict pi,
+    struct part* restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
+#endif /* SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IACT_H */
diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..71662f14c61c92d65bcf493b6f5a43b8172e3697
--- /dev/null
+++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h
@@ -0,0 +1,209 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IO_H
+#define SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IO_H
+/**
+ * @file PressureEnergy/hydro_io.h
+ * @brief P-U implementation of SPH (i/o routines)
+ *
+ * The thermal variable is the internal energy (u). A simple variable
+ * viscosity term (Morris & Monaghan 1997) with a Balsara switch is
+ * implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "adiabatic_index.h"
+#include "hydro.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
+
+/**
+ * @brief Specifies which particle fields to read from a dataset
+ *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
+ */
+INLINE static void hydro_read_particles(struct part* parts,
+                                        struct io_props* list,
+                                        int* num_fields) {
+
+  *num_fields = 8;
+
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, parts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                parts, mass);
+  list[3] = io_make_input_field("SmoothingLength", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_LENGTH, parts, h);
+  list[4] = io_make_input_field("InternalEnergy", FLOAT, 1, COMPULSORY,
+                                UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, parts, id);
+  list[6] = io_make_input_field("Accelerations", FLOAT, 3, OPTIONAL,
+                                UNIT_CONV_ACCELERATION, parts, a_hydro);
+  list[7] = io_make_input_field("Density", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_DENSITY, parts, rho);
+}
+
+INLINE static void convert_S(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_entropy(p);
+}
+
+INLINE static void convert_P(const struct engine* e, const struct part* p,
+                             const struct xpart* xp, float* ret) {
+
+  ret[0] = hydro_get_comoving_pressure(p);
+}
+
+INLINE static void convert_part_pos(const struct engine* e,
+                                    const struct part* p,
+                                    const struct xpart* xp, double* ret) {
+
+  if (e->s->periodic) {
+    ret[0] = box_wrap(p->x[0], 0.0, e->s->dim[0]);
+    ret[1] = box_wrap(p->x[1], 0.0, e->s->dim[1]);
+    ret[2] = box_wrap(p->x[2], 0.0, e->s->dim[2]);
+  } else {
+    ret[0] = p->x[0];
+    ret[1] = p->x[1];
+    ret[2] = p->x[2];
+  }
+}
+
+INLINE static void convert_part_vel(const struct engine* e,
+                                    const struct part* p,
+                                    const struct xpart* xp, float* ret) {
+
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const struct cosmology* cosmo = e->cosmology;
+  const integertime_t ti_current = e->ti_current;
+  const double time_base = e->time_base;
+
+  const integertime_t ti_beg = get_integer_time_begin(ti_current, p->time_bin);
+  const integertime_t ti_end = get_integer_time_end(ti_current, p->time_bin);
+
+  /* Get time-step since the last kick */
+  float dt_kick_grav, dt_kick_hydro;
+  if (with_cosmology) {
+    dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, ti_beg, ti_current);
+    dt_kick_grav -=
+        cosmology_get_grav_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2);
+    dt_kick_hydro = cosmology_get_hydro_kick_factor(cosmo, ti_beg, ti_current);
+    dt_kick_hydro -=
+        cosmology_get_hydro_kick_factor(cosmo, ti_beg, (ti_beg + ti_end) / 2);
+  } else {
+    dt_kick_grav = (ti_current - ((ti_beg + ti_end) / 2)) * time_base;
+    dt_kick_hydro = (ti_current - ((ti_beg + ti_end) / 2)) * time_base;
+  }
+
+  /* Extrapolate the velocites to the current time */
+  hydro_get_drifted_velocities(p, xp, dt_kick_hydro, dt_kick_grav, ret);
+
+  /* Conversion from internal units to peculiar velocities */
+  ret[0] *= cosmo->a_inv;
+  ret[1] *= cosmo->a_inv;
+  ret[2] *= cosmo->a_inv;
+}
+
+INLINE static void convert_part_potential(const struct engine* e,
+                                          const struct part* p,
+                                          const struct xpart* xp, float* ret) {
+  if (p->gpart != NULL)
+    ret[0] = gravity_get_comoving_potential(p->gpart);
+  else
+    ret[0] = 0.f;
+}
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
+ */
+INLINE static void hydro_write_particles(const struct part* parts,
+                                         const struct xpart* xparts,
+                                         struct io_props* list,
+                                         int* num_fields) {
+
+  *num_fields = 11;
+
+  /* List what we want to write */
+  list[0] = io_make_output_field_convert_part("Coordinates", DOUBLE, 3,
+                                              UNIT_CONV_LENGTH, parts, xparts,
+                                              convert_part_pos);
+  list[1] = io_make_output_field_convert_part(
+      "Velocities", FLOAT, 3, UNIT_CONV_SPEED, parts, xparts, convert_part_vel);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, parts, mass);
+  list[3] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 parts, h);
+  list[4] = io_make_output_field("InternalEnergy", FLOAT, 1,
+                                 UNIT_CONV_ENERGY_PER_UNIT_MASS, parts, u);
+  list[5] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+  list[6] =
+      io_make_output_field("Density", FLOAT, 1, UNIT_CONV_DENSITY, parts, rho);
+  list[7] = io_make_output_field("Pressure", FLOAT, 1, UNIT_CONV_PRESSURE,
+                                 parts, pressure_bar);
+  list[8] = io_make_output_field_convert_part("Entropy", FLOAT, 1,
+                                              UNIT_CONV_ENTROPY_PER_UNIT_MASS,
+                                              parts, xparts, convert_S);
+  list[9] = io_make_output_field("Viscosity", FLOAT, 1, UNIT_CONV_NO_UNITS,
+                                 parts, alpha);
+  list[10] = io_make_output_field_convert_part("Potential", FLOAT, 1,
+                                               UNIT_CONV_POTENTIAL, parts,
+                                               xparts, convert_part_potential);
+}
+
+/**
+ * @brief Writes the current model of SPH to the file
+ * @param h_grpsph The HDF5 group in which to write
+ */
+INLINE static void hydro_write_flavour(hid_t h_grpsph) {
+
+  /* Viscosity and thermal conduction */
+  /* Nothing in this minimal model... */
+  io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment");
+  io_write_attribute_s(h_grpsph, "Viscosity Model",
+                       "Variable viscosity as in Morris and Monaghan (1997)");
+
+  /* Time integration properties */
+  io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt",
+                       const_max_u_change);
+}
+
+/**
+ * @brief Are we writing entropy in the internal energy field ?
+ *
+ * @return 1 if entropy is in 'internal energy', 0 otherwise.
+ */
+INLINE static int writeEntropyFlag(void) { return 0; }
+
+#endif /* SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_IO_H */
diff --git a/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_part.h b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_part.h
new file mode 100644
index 0000000000000000000000000000000000000000..ecd20938456b04004ed2299fbe1de0c1b8bb50d6
--- /dev/null
+++ b/src/hydro/PressureEnergyMorrisMonaghanAV/hydro_part.h
@@ -0,0 +1,198 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk) &
+ *                    Josh Borrow (joshua.borrow@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_PART_H
+#define SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_PART_H
+/**
+ * @file PressureEnergy/hydro_part.h
+ * @brief P-U implementation of SPH (Particle definition)
+ *
+ * The thermal variable is the internal energy (u). A simple variable
+ * viscosity term (Morris & Monaghan 1997) with a Balsara switch is
+ * implemented.
+ *
+ * No thermal conduction term is implemented.
+ *
+ * See PressureEnergy/hydro.h for references.
+ */
+
+#include "chemistry_struct.h"
+#include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Particle fields not needed during the SPH loops over neighbours.
+ *
+ * This structure contains the particle fields that are not used in the
+ * density or force loops. Quantities should be used in the kick, drift and
+ * potentially ghost tasks only.
+ */
+struct xpart {
+
+  /*! Offset between current position and position at last tree rebuild. */
+  float x_diff[3];
+
+  /*! Offset between the current position and position at the last sort. */
+  float x_diff_sort[3];
+
+  /*! Velocity at the last full step. */
+  float v_full[3];
+
+  /*! Gravitational acceleration at the last full step. */
+  float a_grav[3];
+
+  /*! Internal energy at the last full step. */
+  float u_full;
+
+  /*! Additional data used to record cooling information */
+  struct cooling_xpart_data cooling_data;
+
+  /*! Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /*! Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
+} SWIFT_STRUCT_ALIGN;
+
+/**
+ * @brief Particle fields for the SPH particles
+ *
+ * The density and force substructures are used to contain variables only used
+ * within the density and force loops over neighbours. All more permanent
+ * variables should be declared in the main part of the part structure,
+ */
+struct part {
+
+  /*! Particle unique ID. */
+  long long id;
+
+  /*! Pointer to corresponding gravity part. */
+  struct gpart* gpart;
+
+  /*! Particle position. */
+  double x[3];
+
+  /*! Particle predicted velocity. */
+  float v[3];
+
+  /*! Particle acceleration. */
+  float a_hydro[3];
+
+  /*! Particle mass. */
+  float mass;
+
+  /*! Particle smoothing length. */
+  float h;
+
+  /*! Particle internal energy. */
+  float u;
+
+  /*! Time derivative of the internal energy. */
+  float u_dt;
+
+  /*! Particle density. */
+  float rho;
+
+  /*! Particle pressure (weighted) */
+  float pressure_bar;
+
+  /*! Artificial viscosity */
+  float alpha;
+
+  /* Store density/force specific stuff. */
+  union {
+
+    /**
+     * @brief Structure for the variables only used in the density loop over
+     * neighbours.
+     *
+     * Quantities in this sub-structure should only be accessed in the density
+     * loop over neighbours and the ghost task.
+     */
+    struct {
+
+      /*! Neighbour number count. */
+      float wcount;
+
+      /*! Derivative of the neighbour number with respect to h. */
+      float wcount_dh;
+
+      /*! Derivative of density with respect to h */
+      float rho_dh;
+
+      /*! Derivative of the weighted pressure with respect to h */
+      float pressure_bar_dh;
+
+      /*! Particle velocity curl. */
+      float rot_v[3];
+
+      /*! Particle velocity divergence. */
+      float div_v;
+    } density;
+
+    /**
+     * @brief Structure for the variables only used in the force loop over
+     * neighbours.
+     *
+     * Quantities in this sub-structure should only be accessed in the force
+     * loop over neighbours and the ghost, drift and kick tasks.
+     */
+    struct {
+
+      /*! "Grad h" term -- only partial in P-U */
+      float f;
+
+      /*! Particle soundspeed. */
+      float soundspeed;
+
+      /*! Particle signal velocity */
+      float v_sig;
+
+      /*! Time derivative of smoothing length  */
+      float h_dt;
+
+      /*! Balsara switch */
+      float balsara;
+    } force;
+  };
+
+  /* Chemistry information */
+  struct chemistry_part_data chemistry_data;
+
+  /*! Time-step length */
+  timebin_t time_bin;
+
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Time of the last drift */
+  integertime_t ti_drift;
+
+  /* Time of the last kick */
+  integertime_t ti_kick;
+
+#endif
+
+} SWIFT_STRUCT_ALIGN;
+
+#endif /* SWIFT_PRESSURE_ENERGY_MORRIS_HYDRO_PART_H */
diff --git a/src/hydro/PressureEntropy/hydro.h b/src/hydro/PressureEntropy/hydro.h
index 3f0f9931ebd557fffcab7e89f3c6297c2fb26474..40b3f42eaed7cbff3c6503caa0fc8801d65ac8e3 100644
--- a/src/hydro/PressureEntropy/hydro.h
+++ b/src/hydro/PressureEntropy/hydro.h
@@ -42,26 +42,58 @@
 #include "minmax.h"
 
 /**
- * @brief Returns the comoving internal energy of a particle
+ * @brief Returns the comoving internal energy of a particle at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float
-hydro_get_comoving_internal_energy(const struct part *restrict p) {
+hydro_get_comoving_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp) {
 
-  return gas_internal_energy_from_entropy(p->rho_bar, p->entropy);
+  return gas_internal_energy_from_entropy(p->rho_bar, xp->entropy_full);
 }
 
 /**
- * @brief Returns the physical internal energy of a particle
+ * @brief Returns the physical internal energy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float
 hydro_get_physical_internal_energy(const struct part *restrict p,
+                                   const struct xpart *restrict xp,
                                    const struct cosmology *cosmo) {
 
+  return gas_internal_energy_from_entropy(p->rho_bar * cosmo->a3_inv,
+                                          xp->entropy_full);
+}
+/**
+ * @brief Returns the comoving internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_internal_energy(const struct part *restrict p) {
+
+  return gas_internal_energy_from_entropy(p->rho_bar, p->entropy);
+}
+
+/**
+ * @brief Returns the physical internal energy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_internal_energy(const struct part *restrict p,
+                                           const struct cosmology *cosmo) {
+
   return gas_internal_energy_from_entropy(p->rho_bar * cosmo->a3_inv,
                                           p->entropy);
 }
@@ -89,24 +121,57 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_pressure(
 }
 
 /**
- * @brief Returns the comoving entropy of a particle
+ * @brief Returns the comoving entropy of a particle at the last
+ * time the particle was kicked.
  *
- * @param p The particle of interest
+ * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_comoving_entropy(
-    const struct part *restrict p) {
+    const struct part *restrict p, const struct xpart *restrict xp) {
 
-  return p->entropy;
+  return xp->entropy_full;
 }
 
 /**
- * @brief Returns the physical entropy of a particle
+ * @brief Returns the physical entropy of a particl at the last
+ * time the particle was kicked.
  *
  * @param p The particle of interest.
+ * @param xp The extended data of the particle of interest.
  * @param cosmo The cosmological model.
  */
 __attribute__((always_inline)) INLINE static float hydro_get_physical_entropy(
-    const struct part *restrict p, const struct cosmology *cosmo) {
+    const struct part *restrict p, const struct xpart *restrict xp,
+    const struct cosmology *cosmo) {
+
+  /* Note: no cosmological conversion required here with our choice of
+   * coordinates. */
+  return xp->entropy_full;
+}
+
+/**
+ * @brief Returns the comoving entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_comoving_entropy(const struct part *restrict p) {
+
+  return p->entropy;
+}
+
+/**
+ * @brief Returns the physical entropy of a particle drifted to the
+ * current time.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_drifted_physical_entropy(const struct part *restrict p,
+                                   const struct cosmology *cosmo) {
 
   /* Note: no cosmological conversion required here with our choice of
    * coordinates. */
@@ -210,12 +275,28 @@ __attribute__((always_inline)) INLINE static void hydro_get_drifted_velocities(
  *
  * @param p The particle of interest
  */
-__attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
-    const struct part *restrict p) {
+__attribute__((always_inline)) INLINE static float
+hydro_get_comoving_internal_energy_dt(const struct part *restrict p) {
 
   return gas_internal_energy_from_entropy(p->rho_bar, p->entropy_dt);
 }
 
+/**
+ * @brief Returns the time derivative of physical internal energy of a particle
+ *
+ * We assume a constant density.
+ *
+ * @param p The particle of interest.
+ * @param cosmo The cosmological model.
+ */
+__attribute__((always_inline)) INLINE static float
+hydro_get_physical_internal_energy_dt(const struct part *restrict p,
+                                      const struct cosmology *cosmo) {
+
+  return gas_internal_energy_from_entropy(p->rho_bar * cosmo->a3_inv,
+                                          p->entropy_dt);
+}
+
 /**
  * @brief Returns the time derivative of internal energy of a particle
  *
@@ -224,12 +305,44 @@ __attribute__((always_inline)) INLINE static float hydro_get_internal_energy_dt(
  * @param p The particle of interest.
  * @param du_dt The new time derivative of the internal energy.
  */
-__attribute__((always_inline)) INLINE static void hydro_set_internal_energy_dt(
-    struct part *restrict p, float du_dt) {
+__attribute__((always_inline)) INLINE static void
+hydro_set_comoving_internal_energy_dt(struct part *restrict p, float du_dt) {
 
   p->entropy_dt = gas_entropy_from_internal_energy(p->rho_bar, du_dt);
 }
 
+/**
+ * @brief Sets the time derivative of the physical internal energy of a particle
+ *
+ * We assume a constant density for the conversion to entropy.
+ *
+ * @param p The particle of interest.
+ * @param cosmo Cosmology data structure
+ * @param du_dt The time derivative of the internal energy.
+ */
+__attribute__((always_inline)) INLINE static void
+hydro_set_physical_internal_energy_dt(struct part *restrict p,
+                                      const struct cosmology *restrict cosmo,
+                                      float du_dt) {
+  p->entropy_dt =
+      gas_entropy_from_internal_energy(p->rho_bar * cosmo->a3_inv, du_dt);
+}
+/**
+ * @brief Sets the physical entropy of a particle
+ *
+ * @param p The particle of interest.
+ * @param xp The extended particle data.
+ * @param cosmo Cosmology data structure
+ * @param entropy The physical entropy
+ */
+__attribute__((always_inline)) INLINE static void hydro_set_physical_entropy(
+    struct part *p, struct xpart *xp, const struct cosmology *cosmo,
+    const float entropy) {
+
+  /* Note there is no conversion from physical to comoving entropy */
+  xp->entropy_full = entropy;
+}
+
 /**
  * @brief Computes the hydro time-step of a given particle
  *
@@ -370,15 +483,24 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
 /**
  * @brief Prepare a particle for the force calculation.
  *
- * Computes viscosity term, conduction term and smoothing length gradient terms.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
  * @param p The particle to act upon
  * @param xp The extended particle data to act upon
  * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const float dt_alpha) {
 
   const float fac_mu = cosmo->a_factor_mu;
 
@@ -398,7 +520,8 @@ __attribute__((always_inline)) INLINE static void hydro_prepare_force(
 
   /* Compute the Balsara switch */
   const float balsara =
-      abs_div_v / (abs_div_v + curl_v + 0.0001f * soundspeed * fac_mu / p->h);
+      hydro_props->viscosity.alpha * abs_div_v /
+      (abs_div_v + curl_v + 0.0001f * soundspeed * fac_mu / p->h);
 
   /* Divide the pressure by the density squared to get the SPH term */
   const float rho_bar_inv = 1.f / p->rho_bar;
@@ -587,7 +710,7 @@ __attribute__((always_inline)) INLINE static void hydro_kick_extra(
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
     struct part *restrict p, struct xpart *restrict xp,
-    const struct cosmology *cosmo) {
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props) {
 
   /* We read u in the entropy field. We now get S from u */
   xp->entropy_full =
@@ -622,6 +745,7 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
     struct part *restrict p, struct xpart *restrict xp) {
 
   p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
   p->rho_bar = 0.f;
   p->entropy_one_over_gamma = pow_one_over_gamma(p->entropy);
   xp->v_full[0] = p->v[0];
@@ -652,4 +776,14 @@ hydro_set_init_internal_energy(struct part *p, float u_init) {
   p->entropy = u_init;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part *p, const struct xpart *xp) {}
+
 #endif /* SWIFT_PRESSURE_ENTROPY_HYDRO_H */
diff --git a/src/hydro/PressureEntropy/hydro_debug.h b/src/hydro/PressureEntropy/hydro_debug.h
index 14d69bb650ff1bbd49394c0ca2f6256ad0cb188d..2163b70b94dde4e88f010d962358dccbde7960a3 100644
--- a/src/hydro/PressureEntropy/hydro_debug.h
+++ b/src/hydro/PressureEntropy/hydro_debug.h
@@ -36,14 +36,14 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "v=[%.3e,%.3e,%.3e],v_full=[%.3e,%.3e,%.3e] \n a=[%.3e,%.3e,%.3e],\n "
       "h=%.3e, wcount=%.3f, wcount_dh=%.3e, m=%.3e, dh_drho=%.3e, rho=%.3e, "
       "rho_bar=%.3e, P=%.3e, dP_dh=%.3e, P_over_rho2=%.3e, S=%.3e, S^1/g=%.3e, "
-      "dS/dt=%.3e,\nc=%.3e v_sig=%e dh/dt=%.3e time_bin=%d\n",
+      "dS/dt=%.3e,\nc=%.3e v_sig=%e dh/dt=%.3e time_bin=%d wakeup=%d\n",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], xp->v_full[0],
       xp->v_full[1], xp->v_full[2], p->a_hydro[0], p->a_hydro[1], p->a_hydro[2],
       p->h, p->density.wcount, p->density.wcount_dh, p->mass, p->density.rho_dh,
       p->rho, p->rho_bar, hydro_get_comoving_pressure(p),
       p->density.pressure_dh, p->force.P_over_rho2, p->entropy,
       p->entropy_one_over_gamma, p->entropy_dt, p->force.soundspeed,
-      p->force.v_sig, p->force.h_dt, p->time_bin);
+      p->force.v_sig, p->force.h_dt, p->time_bin, p->wakeup);
 }
 
 #endif /* SWIFT_PRESSURE_ENTROPY_HYDRO_DEBUG_H */
diff --git a/src/hydro/PressureEntropy/hydro_iact.h b/src/hydro/PressureEntropy/hydro_iact.h
index b8f8c1983a3b1fb67781f7228194deb770273988..19279adec1f37117cf985e63a18a681ceee4f973 100644
--- a/src/hydro/PressureEntropy/hydro_iact.h
+++ b/src/hydro/PressureEntropy/hydro_iact.h
@@ -259,12 +259,11 @@ __attribute__((always_inline)) INLINE static void runner_iact_force(
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Signal velocity */
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Now construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Now, convolve with the kernel */
   const float visc_term = 0.5f * visc * (wi_dr + wj_dr);
@@ -373,12 +372,11 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   const float mu_ij = fac_mu * r_inv * omega_ij; /* This is 0 or negative */
 
   /* Signal velocity */
-  const float v_sig = ci + cj - 3.f * mu_ij;
+  const float v_sig = ci + cj - const_viscosity_beta * mu_ij;
 
   /* Now construct the full viscosity term */
   const float rho_ij = 0.5f * (rhoi + rhoj);
-  const float visc = -0.25f * const_viscosity_alpha * v_sig * mu_ij *
-                     (balsara_i + balsara_j) / rho_ij;
+  const float visc = -0.25f * v_sig * mu_ij * (balsara_i + balsara_j) / rho_ij;
 
   /* Now, convolve with the kernel */
   const float visc_term = 0.5f * visc * (wi_dr + wj_dr);
@@ -404,4 +402,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
   pi->entropy_dt += mj * visc_term * r_inv * dvdr;
 }
 
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->force.v_sig > const_limiter_max_v_sig_ratio * pj->force.v_sig) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
+
 #endif /* SWIFT_PRESSURE_ENTROPY_HYDRO_IACT_H */
diff --git a/src/hydro/PressureEntropy/hydro_io.h b/src/hydro/PressureEntropy/hydro_io.h
index 8c11bf6e334e18b10217e90f6573a42e40880955..e9397bf6108b8bc16658157e424055274f05f23c 100644
--- a/src/hydro/PressureEntropy/hydro_io.h
+++ b/src/hydro/PressureEntropy/hydro_io.h
@@ -71,7 +71,7 @@ INLINE static void hydro_read_particles(struct part* parts,
 INLINE static void convert_u(const struct engine* e, const struct part* p,
                              const struct xpart* xp, float* ret) {
 
-  ret[0] = hydro_get_comoving_internal_energy(p);
+  ret[0] = hydro_get_comoving_internal_energy(p, xp);
 }
 
 INLINE static void convert_P(const struct engine* e, const struct part* p,
@@ -194,8 +194,6 @@ INLINE static void hydro_write_flavour(hid_t h_grpsph) {
   io_write_attribute_s(
       h_grpsph, "Viscosity Model",
       "as in Springel (2005), i.e. Monaghan (1992) with Balsara (1995) switch");
-  io_write_attribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha);
-  io_write_attribute_f(h_grpsph, "Viscosity beta", 3.f);
 
   /* Time integration properties */
   io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt",
diff --git a/src/hydro/PressureEntropy/hydro_part.h b/src/hydro/PressureEntropy/hydro_part.h
index fb8424d66196b7013866acef6bec6ec9889a3353..6cf5a88a167c529ad81737f1e206ba475f6bbc0e 100644
--- a/src/hydro/PressureEntropy/hydro_part.h
+++ b/src/hydro/PressureEntropy/hydro_part.h
@@ -32,6 +32,8 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "star_formation_struct.h"
+#include "tracers_struct.h"
 
 /* Extra particle data not needed during the SPH loops over neighbours. */
 struct xpart {
@@ -54,6 +56,12 @@ struct xpart {
   /*! Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /*! Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
+  /*! Additional data used by the star formation */
+  struct star_formation_xpart_data sf_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /* Data of a single particle. */
@@ -148,6 +156,9 @@ struct part {
   /* Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro/Shadowswift/hydro.h b/src/hydro/Shadowswift/hydro.h
index cca2a241866fc797055922a48c25cebd6fa1b140..b0f3207dfce69ca79899b1134740d035d47251d1 100644
--- a/src/hydro/Shadowswift/hydro.h
+++ b/src/hydro/Shadowswift/hydro.h
@@ -103,6 +103,9 @@ __attribute__((always_inline)) INLINE static void hydro_first_init_part(
 
   const float mass = p->conserved.mass;
 
+  p->time_bin = 0;
+  p->wakeup = time_bin_not_awake;
+
   p->primitives.v[0] = p->v[0];
   p->primitives.v[1] = p->v[1];
   p->primitives.v[2] = p->v[2];
@@ -283,22 +286,26 @@ __attribute__((always_inline)) INLINE static void hydro_part_has_no_neighbours(
 }
 
 /**
- * @brief Prepare a particle for the gradient calculation.
+ * @brief Prepare a particle for the force calculation.
  *
- * The name of this method is confusing, as this method is really called after
- * the density loop and before the gradient loop.
+ * This function is called in the ghost task to convert some quantities coming
+ * from the density loop over neighbours into quantities ready to be used in the
+ * force loop over neighbours. Quantities are typically read from the density
+ * sub-structure and written to the force sub-structure.
+ * Examples of calculations done here include the calculation of viscosity term
+ * constants, thermal conduction terms, hydro conversions, etc.
  *
- * We use it to set the physical timestep for the particle and to copy the
- * actual velocities, which we need to boost our interfaces during the flux
- * calculation. We also initialize the variables used for the time step
- * calculation.
- *
- * @param p The particle to act upon.
- * @param xp The extended particle data to act upon.
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cosmo The current cosmological model.
+ * @param hydro_props Hydrodynamic properties.
+ * @param dt_alpha The time-step used to evolve non-cosmological quantities such
+ *                 as the artificial viscosity.
  */
 __attribute__((always_inline)) INLINE static void hydro_prepare_force(
     struct part* restrict p, struct xpart* restrict xp,
-    const struct cosmology* cosmo) {
+    const struct cosmology* cosmo, const struct hydro_props* hydro_props,
+    const float dt_alpha) {
 
   /* Initialize time step criterion variables */
   p->timestepvars.vmax = 0.0f;
@@ -411,7 +418,8 @@ __attribute__((always_inline)) INLINE static void hydro_reset_predicted_values(
  * @param xp The extended particle data to act upon.
  */
 __attribute__((always_inline)) INLINE static void hydro_convert_quantities(
-    struct part* p, struct xpart* xp, const struct cosmology* cosmo) {}
+    struct part* p, struct xpart* xp, const struct cosmology* cosmo,
+    const struct hydro_props* hydro_props) {}
 
 /**
  * @brief Extra operations to be done during the drift
@@ -842,4 +850,14 @@ __attribute__((always_inline)) INLINE static float hydro_get_physical_density(
   return cosmo->a3_inv * p->primitives.rho;
 }
 
+/**
+ * @brief Operations performed when a particle gets removed from the
+ * simulation volume.
+ *
+ * @param p The particle.
+ * @param xp The extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void hydro_remove_part(
+    const struct part* p, const struct xpart* xp) {}
+
 #endif /* SWIFT_SHADOWSWIFT_HYDRO_H */
diff --git a/src/hydro/Shadowswift/hydro_debug.h b/src/hydro/Shadowswift/hydro_debug.h
index 7cd7f89c8112ebcf1930c5ca52cb389139191975..8ff85d62fc7d58d53220b1f77a7afb44c00c33b0 100644
--- a/src/hydro/Shadowswift/hydro_debug.h
+++ b/src/hydro/Shadowswift/hydro_debug.h
@@ -23,6 +23,8 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "x=[%.16e,%.16e,%.16e], "
       "v=[%.3e,%.3e,%.3e], "
       "a=[%.3e,%.3e,%.3e], "
+      "time_bin=%d, "
+      "wakeup=%d, "
       "h=%.3e, "
       "primitives={"
       "v=[%.3e,%.3e,%.3e], "
@@ -47,9 +49,9 @@ __attribute__((always_inline)) INLINE static void hydro_debug_particle(
       "wcount_dh=%.3e, "
       "wcount=%.3e}",
       p->x[0], p->x[1], p->x[2], p->v[0], p->v[1], p->v[2], p->a_hydro[0],
-      p->a_hydro[1], p->a_hydro[2], p->h, p->primitives.v[0],
-      p->primitives.v[1], p->primitives.v[2], p->primitives.rho,
-      p->primitives.P, p->primitives.gradients.rho[0],
+      p->a_hydro[1], p->a_hydro[2], p->time_bin, p->wakeup, p->h,
+      p->primitives.v[0], p->primitives.v[1], p->primitives.v[2],
+      p->primitives.rho, p->primitives.P, p->primitives.gradients.rho[0],
       p->primitives.gradients.rho[1], p->primitives.gradients.rho[2],
       p->primitives.gradients.v[0][0], p->primitives.gradients.v[0][1],
       p->primitives.gradients.v[0][2], p->primitives.gradients.v[1][0],
diff --git a/src/hydro/Shadowswift/hydro_iact.h b/src/hydro/Shadowswift/hydro_iact.h
index eda8e3759d9e08dac8073ebed9fb36dd0c5b99f6..791e4c7924df9806fa9150d03c08a543771a7049 100644
--- a/src/hydro/Shadowswift/hydro_iact.h
+++ b/src/hydro/Shadowswift/hydro_iact.h
@@ -342,3 +342,28 @@ __attribute__((always_inline)) INLINE static void runner_iact_nonsym_force(
 
   runner_iact_fluxes_common(r2, dx, hi, hj, pi, pj, 0, a, H);
 }
+
+/**
+ * @brief Timestep limiter loop
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here if both particles are active */
+}
+
+/**
+ * @brief Timestep limiter loop (non-symmetric version)
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_nonsym_limiter(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Wake up the neighbour? */
+  if (pi->timestepvars.vmax >
+      const_limiter_max_v_sig_ratio * pj->timestepvars.vmax) {
+
+    pj->wakeup = time_bin_awake;
+  }
+}
diff --git a/src/hydro/Shadowswift/hydro_part.h b/src/hydro/Shadowswift/hydro_part.h
index a7cc9daf0839216f098ac05c2267adc60ea11fb0..d229b5c9d63ecfa855397f74f8a52a4117cefc03 100644
--- a/src/hydro/Shadowswift/hydro_part.h
+++ b/src/hydro/Shadowswift/hydro_part.h
@@ -21,6 +21,7 @@
 
 #include "chemistry_struct.h"
 #include "cooling_struct.h"
+#include "tracers_struct.h"
 #include "voronoi_cell.h"
 
 /* Extra particle data not needed during the computation. */
@@ -41,6 +42,9 @@ struct xpart {
   /* Additional data used to record cooling information */
   struct cooling_xpart_data cooling_data;
 
+  /* Additional data used by the tracers */
+  struct tracers_xpart_data tracers_data;
+
 } SWIFT_STRUCT_ALIGN;
 
 /* Data of a single particle. */
@@ -179,6 +183,9 @@ struct part {
   /* Time-step length */
   timebin_t time_bin;
 
+  /* Need waking-up ? */
+  timebin_t wakeup;
+
 #ifdef SWIFT_DEBUG_CHECKS
 
   /* Time of the last drift */
diff --git a/src/hydro_io.h b/src/hydro_io.h
index b6e0c36cc7415a1f628a109795aa98b4f583036c..60e5593cc0630ef4bc33ab407f6a669b7de1def1 100644
--- a/src/hydro_io.h
+++ b/src/hydro_io.h
@@ -31,6 +31,8 @@
 #include "./hydro/PressureEntropy/hydro_io.h"
 #elif defined(HOPKINS_PU_SPH)
 #include "./hydro/PressureEnergy/hydro_io.h"
+#elif defined(HOPKINS_PU_SPH_MONAGHAN)
+#include "./hydro/PressureEnergyMorrisMonaghanAV/hydro_io.h"
 #elif defined(DEFAULT_SPH)
 #include "./hydro/Default/hydro_io.h"
 #elif defined(GIZMO_MFV_SPH)
@@ -41,6 +43,8 @@
 #include "./hydro/Shadowswift/hydro_io.h"
 #elif defined(PLANETARY_SPH)
 #include "./hydro/Planetary/hydro_io.h"
+#elif defined(ANARCHY_PU_SPH)
+#include "./hydro/AnarchyPU/hydro_io.h"
 #else
 #error "Invalid choice of SPH variant"
 #endif
diff --git a/src/hydro_properties.c b/src/hydro_properties.c
index 9520781be45f7d9b59534c57e542e0802759aaec..f14c88bfb5128c1da17590f50698e5d038734b71 100644
--- a/src/hydro_properties.c
+++ b/src/hydro_properties.c
@@ -30,16 +30,45 @@
 #include "dimension.h"
 #include "equation_of_state.h"
 #include "error.h"
+#include "gravity_properties.h"
 #include "hydro.h"
 #include "kernel_hydro.h"
+#include "parser.h"
+#include "units.h"
 
 #define hydro_props_default_max_iterations 30
 #define hydro_props_default_volume_change 1.4f
 #define hydro_props_default_h_max FLT_MAX
+#define hydro_props_default_h_min_ratio 0.f
 #define hydro_props_default_h_tolerance 1e-4
 #define hydro_props_default_init_temp 0.f
 #define hydro_props_default_min_temp 0.f
-#define hydro_props_default_H_fraction 0.76
+#define hydro_props_default_H_ionization_temperature 1e4
+#define hydro_props_default_viscosity_alpha 0.8f
+
+#ifdef ANARCHY_PU_SPH
+/* This nasty #ifdef is only temporary until we separate the viscosity
+ * and hydro components. If it is not removed by July 2019, shout at JB. */
+#define hydro_props_default_viscosity_alpha_min \
+  0.01f /* values taken from Schaller+ 2015 */
+#define hydro_props_default_viscosity_alpha_max \
+  2.0f /* values taken from Schaller+ 2015 */
+#define hydro_props_default_viscosity_length \
+  0.01f /* values taken from Schaller+ 2015 */
+#else
+#define hydro_props_default_viscosity_alpha_min \
+  0.1f /* values taken from (price,2004), not used in legacy gadget mode */
+#define hydro_props_default_viscosity_alpha_max \
+  2.0f /* values taken from (price,2004), not used in legacy gadget mode */
+#define hydro_props_default_viscosity_length \
+  0.1f /* Values taken from (Price,2004), not used in legacy gadget mode */
+#endif /* ANARCHY_PU_SPH */
+
+/* Following values taken directly from the ANARCHY paper (Schaller+ 2015) */
+#define hydro_props_default_diffusion_alpha 0.0f
+#define hydro_props_default_diffusion_beta 0.01f
+#define hydro_props_default_diffusion_alpha_max 1.0f
+#define hydro_props_default_diffusion_alpha_min 0.0f
 
 /**
  * @brief Initialize the global properties of the hydro scheme.
@@ -79,10 +108,20 @@ void hydro_props_init(struct hydro_props *p,
   p->h_max = parser_get_opt_param_float(params, "SPH:h_max",
                                         hydro_props_default_h_max);
 
+  /* Minimal smoothing length ratio to softening */
+  p->h_min_ratio = parser_get_opt_param_float(params, "SPH:h_min_ratio",
+                                              hydro_props_default_h_min_ratio);
+
+  /* Temporarily set the minimal softening to 0. */
+  p->h_min = 0.f;
+
   /* Number of iterations to converge h */
   p->max_smoothing_iterations = parser_get_opt_param_int(
       params, "SPH:max_ghost_iterations", hydro_props_default_max_iterations);
 
+  if (p->max_smoothing_iterations <= 10)
+    error("The number of smoothing length iterations should be > 10");
+
   /* Time integration properties */
   p->CFL_condition = parser_get_param_float(params, "SPH:CFL_condition");
   const float max_volume_change = parser_get_opt_param_float(
@@ -101,9 +140,52 @@ void hydro_props_init(struct hydro_props *p,
       (p->initial_temperature < p->minimal_temperature))
     error("Initial temperature lower than minimal allowed temperature!");
 
+  /* Neutral to ionized Hydrogen transition temperature */
+  p->hydrogen_ionization_temperature =
+      parser_get_opt_param_double(params, "SPH:H_ionization_temperature",
+                                  hydro_props_default_H_ionization_temperature);
+
   /* Hydrogen mass fraction */
+  const float default_H_fraction =
+      1. - phys_const->const_primordial_He_fraction;
   p->hydrogen_mass_fraction = parser_get_opt_param_double(
-      params, "SPH:H_mass_fraction", hydro_props_default_H_fraction);
+      params, "SPH:H_mass_fraction", default_H_fraction);
+
+  /* Mean molecular mass for neutral gas */
+  p->mu_neutral = 4. / (1. + 3. * p->hydrogen_mass_fraction);
+
+  /* Mean molecular mass for fully ionised gas */
+  p->mu_ionised = 4. / (8. - 5. * (1. - p->hydrogen_mass_fraction));
+
+  /* Read the artificial viscosity parameters from the file, if they exist */
+  p->viscosity.alpha = parser_get_opt_param_float(
+      params, "SPH:viscosity_alpha", hydro_props_default_viscosity_alpha);
+
+  p->viscosity.alpha_max =
+      parser_get_opt_param_float(params, "SPH:viscosity_alpha_max",
+                                 hydro_props_default_viscosity_alpha_max);
+
+  p->viscosity.alpha_min =
+      parser_get_opt_param_float(params, "SPH:viscosity_alpha_min",
+                                 hydro_props_default_viscosity_alpha_min);
+
+  p->viscosity.length = parser_get_opt_param_float(
+      params, "SPH:viscosity_length", hydro_props_default_viscosity_length);
+
+  /* Same for the thermal diffusion parameters */
+  p->diffusion.alpha = parser_get_opt_param_float(
+      params, "SPH:diffusion_alpha", hydro_props_default_diffusion_alpha);
+
+  p->diffusion.beta = parser_get_opt_param_float(
+      params, "SPH:diffusion_beta", hydro_props_default_diffusion_beta);
+
+  p->diffusion.alpha_max =
+      parser_get_opt_param_float(params, "SPH:diffusion_alpha_max",
+                                 hydro_props_default_diffusion_alpha_max);
+
+  p->diffusion.alpha_min =
+      parser_get_opt_param_float(params, "SPH:diffusion_alpha_min",
+                                 hydro_props_default_diffusion_alpha_min);
 
   /* Compute the initial energy (Note the temp. read is in internal units) */
   /* u_init = k_B T_init / (mu m_p (gamma - 1)) */
@@ -113,9 +195,7 @@ void hydro_props_init(struct hydro_props *p,
 
   /* Correct for hydrogen mass fraction (mu) */
   double mean_molecular_weight;
-  if (p->initial_temperature *
-          units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE) >
-      1e4)
+  if (p->initial_temperature > p->hydrogen_ionization_temperature)
     mean_molecular_weight = 4. / (8. - 5. * (1. - p->hydrogen_mass_fraction));
   else
     mean_molecular_weight = 4. / (1. + 3. * p->hydrogen_mass_fraction);
@@ -129,9 +209,7 @@ void hydro_props_init(struct hydro_props *p,
   u_min *= hydro_one_over_gamma_minus_one;
 
   /* Correct for hydrogen mass fraction (mu) */
-  if (p->minimal_temperature *
-          units_cgs_conversion_factor(us, UNIT_CONV_TEMPERATURE) >
-      1e4)
+  if (p->minimal_temperature > p->hydrogen_ionization_temperature)
     mean_molecular_weight = 4. / (8. - 5. * (1. - p->hydrogen_mass_fraction));
   else
     mean_molecular_weight = 4. / (1. + 3. * p->hydrogen_mass_fraction);
@@ -161,6 +239,12 @@ void hydro_props_print(const struct hydro_props *p) {
 
   message("Hydrodynamic integration: CFL parameter: %.4f.", p->CFL_condition);
 
+  message(
+      "Artificial viscosity parameters set to alpha: %.3f, max: %.3f, "
+      "min: %.3f, length: %.3f.",
+      p->viscosity.alpha, p->viscosity.alpha_max, p->viscosity.alpha_min,
+      p->viscosity.length);
+
   message(
       "Hydrodynamic integration: Max change of volume: %.2f "
       "(max|dlog(h)/dt|=%f).",
@@ -180,13 +264,14 @@ void hydro_props_print(const struct hydro_props *p) {
     message("Minimal gas temperature set to %f", p->minimal_temperature);
 
     // Matthieu: Temporary location for this i/o business.
+
 #ifdef PLANETARY_SPH
-#ifdef PLANETARY_SPH_BALSARA
-  message("Planetary SPH: Balsara switch enabled");
+#ifdef PLANETARY_SPH_NO_BALSARA
+  message("Planetary SPH: Balsara switch DISABLED");
 #else
-  message("Planetary SPH: Balsara switch disabled");
-#endif  // PLANETARY_SPH_BALSARA
-#endif  // PLANETARY_SPH
+  message("Planetary SPH: Balsara switch ENABLED");
+#endif
+#endif
 }
 
 #if defined(HAVE_HDF5)
@@ -201,7 +286,8 @@ void hydro_props_print_snapshot(hid_t h_grpsph, const struct hydro_props *p) {
   io_write_attribute_f(h_grpsph, "Kernel delta N_ngb", p->delta_neighbours);
   io_write_attribute_f(h_grpsph, "Kernel eta", p->eta_neighbours);
   io_write_attribute_f(h_grpsph, "Smoothing length tolerance", p->h_tolerance);
-  io_write_attribute_f(h_grpsph, "Maximal smoothing length", p->h_max);
+  io_write_attribute_f(h_grpsph, "Maximal smoothing length [internal units]",
+                       p->h_max);
   io_write_attribute_f(h_grpsph, "CFL parameter", p->CFL_condition);
   io_write_attribute_f(h_grpsph, "Volume log(max(delta h))",
                        p->log_max_h_change);
@@ -210,14 +296,90 @@ void hydro_props_print_snapshot(hid_t h_grpsph, const struct hydro_props *p) {
   io_write_attribute_i(h_grpsph, "Max ghost iterations",
                        p->max_smoothing_iterations);
   io_write_attribute_f(h_grpsph, "Minimal temperature", p->minimal_temperature);
+  io_write_attribute_f(h_grpsph,
+                       "Minimal energy per unit mass [internal units]",
+                       p->minimal_internal_energy);
   io_write_attribute_f(h_grpsph, "Initial temperature", p->initial_temperature);
-  io_write_attribute_f(h_grpsph, "Initial energy per unit mass",
+  io_write_attribute_f(h_grpsph,
+                       "Initial energy per unit mass [internal units]",
                        p->initial_internal_energy);
   io_write_attribute_f(h_grpsph, "Hydrogen mass fraction",
                        p->hydrogen_mass_fraction);
+  io_write_attribute_f(h_grpsph, "Hydrogen ionization transition temperature",
+                       p->hydrogen_ionization_temperature);
+  io_write_attribute_f(h_grpsph, "Alpha viscosity", p->viscosity.alpha);
+  io_write_attribute_f(h_grpsph, "Alpha viscosity (max)",
+                       p->viscosity.alpha_max);
+  io_write_attribute_f(h_grpsph, "Alpha viscosity (min)",
+                       p->viscosity.alpha_min);
+  io_write_attribute_f(h_grpsph, "Viscosity decay length [internal units]",
+                       p->viscosity.length);
+  io_write_attribute_f(h_grpsph, "Beta viscosity", const_viscosity_beta);
+  io_write_attribute_f(h_grpsph, "Max v_sig ratio (limiter)",
+                       const_limiter_max_v_sig_ratio);
 }
 #endif
 
+/**
+ * @brief Initialises a hydro_props struct with somewhat useful values for
+ *        the automated test suite. This is not intended for production use,
+ *        but rather to fill for the purposes of mocking.
+ *
+ * @param p the struct
+ */
+void hydro_props_init_no_hydro(struct hydro_props *p) {
+
+  p->eta_neighbours = 1.2348;
+  p->h_tolerance = hydro_props_default_h_tolerance;
+  p->target_neighbours = pow_dimension(p->eta_neighbours) * kernel_norm;
+  const float delta_eta = p->eta_neighbours * (1.f + p->h_tolerance);
+  p->delta_neighbours =
+      (pow_dimension(delta_eta) - pow_dimension(p->eta_neighbours)) *
+      kernel_norm;
+  p->h_max = hydro_props_default_h_max;
+  p->h_min = 0.f;
+  p->h_min_ratio = hydro_props_default_h_min_ratio;
+  p->max_smoothing_iterations = hydro_props_default_max_iterations;
+  p->CFL_condition = 0.1;
+  p->log_max_h_change = logf(powf(1.4, hydro_dimension_inv));
+
+  /* These values are inconsistent and in a production run would probably lead
+     to a crash. Again, this function is intended for mocking use in unit tests
+     and is _not_ to be used otherwise! */
+  p->minimal_temperature = hydro_props_default_min_temp;
+  p->minimal_internal_energy = 0.f;
+  p->initial_temperature = hydro_props_default_init_temp;
+  p->initial_internal_energy = 0.f;
+
+  p->hydrogen_mass_fraction = 0.755;
+  p->hydrogen_ionization_temperature =
+      hydro_props_default_H_ionization_temperature;
+
+  p->viscosity.alpha = hydro_props_default_viscosity_alpha;
+  p->viscosity.alpha_max = hydro_props_default_viscosity_alpha_max;
+  p->viscosity.alpha_min = hydro_props_default_viscosity_alpha_min;
+  p->viscosity.length = hydro_props_default_viscosity_length;
+
+  p->diffusion.alpha = hydro_props_default_diffusion_alpha;
+  p->diffusion.beta = hydro_props_default_diffusion_beta;
+  p->diffusion.alpha_max = hydro_props_default_diffusion_alpha_max;
+  p->diffusion.alpha_min = hydro_props_default_diffusion_alpha_min;
+}
+
+/**
+ * @brief Update the global properties of the hydro scheme for that time-step.
+ *
+ * @param p The properties to update.
+ * @param gp The properties of the gravity scheme.
+ * @param cosmo The cosmological model.
+ */
+void hydro_props_update(struct hydro_props *p, const struct gravity_props *gp,
+                        const struct cosmology *cosmo) {
+
+  /* Update the minimal allowed smoothing length */
+  p->h_min = p->h_min_ratio * gp->epsilon_cur;
+}
+
 /**
  * @brief Write a hydro_props struct to the given FILE as a stream of bytes.
  *
diff --git a/src/hydro_properties.h b/src/hydro_properties.h
index 64a840692db677704b8617e962d7883505983cc0..afc8a4b87aeb39f6bff1e61ae39edf391c856b1b 100644
--- a/src/hydro_properties.h
+++ b/src/hydro_properties.h
@@ -32,10 +32,14 @@
 #endif
 
 /* Local includes. */
-#include "parser.h"
-#include "physical_constants.h"
 #include "restart.h"
-#include "units.h"
+
+/* Forward declarations */
+struct cosmology;
+struct swift_params;
+struct gravity_props;
+struct phys_const;
+struct unit_system;
 
 /**
  * @brief Contains all the constants and parameters of the hydro scheme
@@ -57,6 +61,12 @@ struct hydro_props {
   /*! Maximal smoothing length */
   float h_max;
 
+  /*! Minimal smoothing length expressed as ratio to softening length */
+  float h_min_ratio;
+
+  /*! Minimal smoothing length */
+  float h_min;
+
   /*! Maximal number of iterations to converge h */
   int max_smoothing_iterations;
 
@@ -69,17 +79,60 @@ struct hydro_props {
   /*! Minimal temperature allowed */
   float minimal_temperature;
 
-  /*! Minimal internal energy per unit mass */
+  /*! Minimal physical internal energy per unit mass */
   float minimal_internal_energy;
 
   /*! Initial temperature */
   float initial_temperature;
 
-  /*! Initial internal energy per unit mass */
+  /*! Initial physical internal energy per unit mass */
   float initial_internal_energy;
 
-  /*! Primoridal hydrogen mass fraction for initial energy conversion */
+  /*! Primordial hydrogen mass fraction for initial energy conversion */
   float hydrogen_mass_fraction;
+
+  /*! Temperature of the neutral to ionized transition of Hydrogen */
+  float hydrogen_ionization_temperature;
+
+  /*! Mean molecular weight below hydrogen ionization temperature */
+  float mu_neutral;
+
+  /*! Mean molecular weight above hydrogen ionization temperature */
+  float mu_ionised;
+
+  /*! Artificial viscosity parameters */
+  struct {
+    /*! For the fixed, simple case. Also used to set the initial AV
+        coefficient for variable schemes. */
+    float alpha;
+
+    /*! Artificial viscosity (max) for the variable case (e.g. M&M) */
+    float alpha_max;
+
+    /*! Artificial viscosity (min) for the variable case (e.g. M&M) */
+    float alpha_min;
+
+    /*! The decay length of the artificial viscosity (used in M&M, etc.) */
+    float length;
+  } viscosity;
+
+  /*! Thermal diffusion parameters */
+  struct {
+
+    /*! Initialisation value, or the case for constant thermal diffusion coeffs
+     */
+    float alpha;
+
+    /*! Tuning parameter for speed of ramp up/down */
+    float beta;
+
+    /*! Maximal value for alpha_diff */
+    float alpha_max;
+
+    /*! Minimal value for alpha_diff */
+    float alpha_min;
+
+  } diffusion;
 };
 
 void hydro_props_print(const struct hydro_props *p);
@@ -88,6 +141,9 @@ void hydro_props_init(struct hydro_props *p,
                       const struct unit_system *us,
                       struct swift_params *params);
 
+void hydro_props_update(struct hydro_props *p, const struct gravity_props *gp,
+                        const struct cosmology *cosmo);
+
 #if defined(HAVE_HDF5)
 void hydro_props_print_snapshot(hid_t h_grpsph, const struct hydro_props *p);
 #endif
@@ -96,4 +152,7 @@ void hydro_props_print_snapshot(hid_t h_grpsph, const struct hydro_props *p);
 void hydro_props_struct_dump(const struct hydro_props *p, FILE *stream);
 void hydro_props_struct_restore(const struct hydro_props *p, FILE *stream);
 
+/* Setup for tests */
+void hydro_props_init_no_hydro(struct hydro_props *p);
+
 #endif /* SWIFT_HYDRO_PROPERTIES */
diff --git a/src/intrinsics.h b/src/intrinsics.h
index 7a4f0870b9d758ed6613e88b6b48a3c93887cd1c..7e3b9108248ddd43303a9103394d818384a9b664 100644
--- a/src/intrinsics.h
+++ b/src/intrinsics.h
@@ -32,7 +32,7 @@
  * This is a wrapper for the GNU intrinsic with an implementation (from
  * Hacker's Delight) if the compiler intrinsics are not available.
  */
-__attribute__((always_inline)) INLINE static int intrinsics_clz(
+__attribute__((always_inline, const)) INLINE static int intrinsics_clz(
     unsigned int x) {
 
 #ifdef __GNUC__
@@ -70,9 +70,10 @@ __attribute__((always_inline)) INLINE static int intrinsics_clz(
  * @brief Returns the number of leading 0-bits in x, starting at the most
  * significant bit position. If x is 0, the result is undefined.
  *
- * This is a wrapper for the GNU intrinsic with an implementation.
+ * This is a wrapper for the GNU intrinsic with a place-holder for a future
+ * version in cases where the compiler intrinsic is not available.
  */
-__attribute__((always_inline)) INLINE static int intrinsics_clzll(
+__attribute__((always_inline, const)) INLINE static int intrinsics_clzll(
     unsigned long long x) {
 
 #ifdef __GNUC__
@@ -89,7 +90,7 @@ __attribute__((always_inline)) INLINE static int intrinsics_clzll(
  * This is a wrapper for the GNU intrinsic with an implementation (from
  * Hacker's Delight) if the compiler intrinsics are not available.
  */
-__attribute__((always_inline)) INLINE static int intrinsics_popcount(
+__attribute__((always_inline, const)) INLINE static int intrinsics_popcount(
     unsigned int x) {
 
 #ifdef __GNUC__
@@ -111,7 +112,7 @@ __attribute__((always_inline)) INLINE static int intrinsics_popcount(
  * This is a wrapper for the GNU intrinsic with an implementation (from
  * Hacker's Delight) if the compiler intrinsics are not available.
  */
-__attribute__((always_inline)) INLINE static int intrinsics_popcountll(
+__attribute__((always_inline, const)) INLINE static int intrinsics_popcountll(
     unsigned long long x) {
 
 #ifdef __GNUC__
diff --git a/src/io_properties.h b/src/io_properties.h
index 037d32338f015975489f6cbca4f7dfafac413e5f..c45edb2641e374e2cfaec6c3251aff7d18f361d6 100644
--- a/src/io_properties.h
+++ b/src/io_properties.h
@@ -43,10 +43,23 @@ typedef void (*conversion_func_part_float)(const struct engine*,
 typedef void (*conversion_func_part_double)(const struct engine*,
                                             const struct part*,
                                             const struct xpart*, double*);
+typedef void (*conversion_func_part_long_long)(const struct engine*,
+                                               const struct part*,
+                                               const struct xpart*, long long*);
 typedef void (*conversion_func_gpart_float)(const struct engine*,
                                             const struct gpart*, float*);
 typedef void (*conversion_func_gpart_double)(const struct engine*,
                                              const struct gpart*, double*);
+typedef void (*conversion_func_gpart_long_long)(const struct engine*,
+                                                const struct gpart*,
+                                                long long*);
+typedef void (*conversion_func_spart_float)(const struct engine*,
+                                            const struct spart*, float*);
+typedef void (*conversion_func_spart_double)(const struct engine*,
+                                             const struct spart*, double*);
+typedef void (*conversion_func_spart_long_long)(const struct engine*,
+                                                const struct spart*,
+                                                long long*);
 
 /**
  * @brief The properties of a given dataset for i/o
@@ -75,6 +88,7 @@ struct io_props {
   char* start_temp_c;
   float* start_temp_f;
   double* start_temp_d;
+  long long* start_temp_l;
 
   /* Pointer to the engine */
   const struct engine* e;
@@ -86,6 +100,7 @@ struct io_props {
   const struct part* parts;
   const struct xpart* xparts;
   const struct gpart* gparts;
+  const struct spart* sparts;
 
   /* Are we converting? */
   int conversion;
@@ -93,10 +108,17 @@ struct io_props {
   /* Conversion function for part */
   conversion_func_part_float convert_part_f;
   conversion_func_part_double convert_part_d;
+  conversion_func_part_long_long convert_part_l;
 
   /* Conversion function for gpart */
   conversion_func_gpart_float convert_gpart_f;
   conversion_func_gpart_double convert_gpart_d;
+  conversion_func_gpart_long_long convert_gpart_l;
+
+  /* Conversion function for spart */
+  conversion_func_spart_float convert_spart_f;
+  conversion_func_spart_double convert_spart_d;
+  conversion_func_spart_long_long convert_spart_l;
 };
 
 /**
@@ -134,11 +156,17 @@ INLINE static struct io_props io_make_input_field_(
   r.parts = NULL;
   r.xparts = NULL;
   r.gparts = NULL;
+  r.sparts = NULL;
   r.conversion = 0;
   r.convert_part_f = NULL;
   r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
   r.convert_gpart_f = NULL;
   r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
 
   return r;
 }
@@ -175,11 +203,17 @@ INLINE static struct io_props io_make_output_field_(
   r.partSize = partSize;
   r.parts = NULL;
   r.gparts = NULL;
+  r.sparts = NULL;
   r.conversion = 0;
   r.convert_part_f = NULL;
   r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
   r.convert_gpart_f = NULL;
   r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
 
   return r;
 }
@@ -223,11 +257,17 @@ INLINE static struct io_props io_make_output_field_convert_part_FLOAT(
   r.parts = parts;
   r.xparts = xparts;
   r.gparts = NULL;
+  r.sparts = NULL;
   r.conversion = 1;
   r.convert_part_f = functionPtr;
   r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
   r.convert_gpart_f = NULL;
   r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
 
   return r;
 }
@@ -242,7 +282,7 @@ INLINE static struct io_props io_make_output_field_convert_part_FLOAT(
  * @param partSize The size in byte of the particle
  * @param parts The particle array
  * @param xparts The xparticle array
- * @param functionPtr The function used to convert a particle to a float
+ * @param functionPtr The function used to convert a particle to a double
  *
  * Do not call this function directly. Use the macro defined above.
  */
@@ -263,11 +303,63 @@ INLINE static struct io_props io_make_output_field_convert_part_DOUBLE(
   r.parts = parts;
   r.xparts = xparts;
   r.gparts = NULL;
+  r.sparts = NULL;
   r.conversion = 1;
   r.convert_part_f = NULL;
   r.convert_part_d = functionPtr;
+  r.convert_part_l = NULL;
+  r.convert_gpart_f = NULL;
+  r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
+
+  return r;
+}
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (1D, 3D, ...)
+ * @param units The units of the dataset
+ * @param partSize The size in byte of the particle
+ * @param parts The particle array
+ * @param xparts The xparticle array
+ * @param functionPtr The function used to convert a particle to a double
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+INLINE static struct io_props io_make_output_field_convert_part_LONGLONG(
+    const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension,
+    enum unit_conversion_factor units, size_t partSize,
+    const struct part* parts, const struct xpart* xparts,
+    conversion_func_part_long_long functionPtr) {
+
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = UNUSED;
+  r.units = units;
+  r.field = NULL;
+  r.partSize = partSize;
+  r.parts = parts;
+  r.xparts = xparts;
+  r.gparts = NULL;
+  r.sparts = NULL;
+  r.conversion = 1;
+  r.convert_part_f = NULL;
+  r.convert_part_d = NULL;
+  r.convert_part_l = functionPtr;
   r.convert_gpart_f = NULL;
   r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
 
   return r;
 }
@@ -309,11 +401,17 @@ INLINE static struct io_props io_make_output_field_convert_gpart_FLOAT(
   r.parts = NULL;
   r.xparts = NULL;
   r.gparts = gparts;
+  r.sparts = NULL;
   r.conversion = 1;
   r.convert_part_f = NULL;
   r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
   r.convert_gpart_f = functionPtr;
   r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
 
   return r;
 }
@@ -327,7 +425,7 @@ INLINE static struct io_props io_make_output_field_convert_gpart_FLOAT(
  * @param units The units of the dataset
  * @param gpartSize The size in byte of the particle
  * @param gparts The particle array
- * @param functionPtr The function used to convert a g-particle to a float
+ * @param functionPtr The function used to convert a g-particle to a double
  *
  * Do not call this function directly. Use the macro defined above.
  */
@@ -347,11 +445,201 @@ INLINE static struct io_props io_make_output_field_convert_gpart_DOUBLE(
   r.parts = NULL;
   r.xparts = NULL;
   r.gparts = gparts;
+  r.sparts = NULL;
   r.conversion = 1;
   r.convert_part_f = NULL;
   r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
   r.convert_gpart_f = NULL;
   r.convert_gpart_d = functionPtr;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
+
+  return r;
+}
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (1D, 3D, ...)
+ * @param units The units of the dataset
+ * @param gpartSize The size in byte of the particle
+ * @param gparts The particle array
+ * @param functionPtr The function used to convert a g-particle to a double
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+INLINE static struct io_props io_make_output_field_convert_gpart_LONGLONG(
+    const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension,
+    enum unit_conversion_factor units, size_t gpartSize,
+    const struct gpart* gparts, conversion_func_gpart_long_long functionPtr) {
+
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = UNUSED;
+  r.units = units;
+  r.field = NULL;
+  r.partSize = gpartSize;
+  r.parts = NULL;
+  r.xparts = NULL;
+  r.gparts = gparts;
+  r.sparts = NULL;
+  r.conversion = 1;
+  r.convert_part_f = NULL;
+  r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
+  r.convert_gpart_f = NULL;
+  r.convert_gpart_d = NULL;
+  r.convert_gpart_l = functionPtr;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
+
+  return r;
+}
+
+/**
+ * @brief Constructs an #io_props (with conversion) from its parameters
+ */
+#define io_make_output_field_convert_spart(name, type, dim, units, spart, \
+                                           convert)                       \
+  io_make_output_field_convert_spart_##type(name, type, dim, units,       \
+                                            sizeof(spart[0]), spart, convert)
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (1D, 3D, ...)
+ * @param units The units of the dataset
+ * @param spartSize The size in byte of the particle
+ * @param sparts The particle array
+ * @param functionPtr The function used to convert a g-particle to a float
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+INLINE static struct io_props io_make_output_field_convert_spart_FLOAT(
+    const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension,
+    enum unit_conversion_factor units, size_t spartSize,
+    const struct spart* sparts, conversion_func_spart_float functionPtr) {
+
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = UNUSED;
+  r.units = units;
+  r.field = NULL;
+  r.partSize = spartSize;
+  r.parts = NULL;
+  r.xparts = NULL;
+  r.gparts = NULL;
+  r.sparts = sparts;
+  r.conversion = 1;
+  r.convert_part_f = NULL;
+  r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
+  r.convert_gpart_f = NULL;
+  r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = functionPtr;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = NULL;
+
+  return r;
+}
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (1D, 3D, ...)
+ * @param units The units of the dataset
+ * @param spartSize The size in byte of the particle
+ * @param sparts The particle array
+ * @param functionPtr The function used to convert a s-particle to a double
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+INLINE static struct io_props io_make_output_field_convert_spart_DOUBLE(
+    const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension,
+    enum unit_conversion_factor units, size_t spartSize,
+    const struct spart* sparts, conversion_func_spart_double functionPtr) {
+
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = UNUSED;
+  r.units = units;
+  r.field = NULL;
+  r.partSize = spartSize;
+  r.parts = NULL;
+  r.xparts = NULL;
+  r.gparts = NULL;
+  r.sparts = sparts;
+  r.conversion = 1;
+  r.convert_part_f = NULL;
+  r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
+  r.convert_gpart_f = NULL;
+  r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = functionPtr;
+  r.convert_spart_l = NULL;
+
+  return r;
+}
+
+/**
+ * @brief Construct an #io_props from its parameters
+ *
+ * @param name Name of the field to read
+ * @param type The type of the data
+ * @param dimension Dataset dimension (1D, 3D, ...)
+ * @param units The units of the dataset
+ * @param spartSize The size in byte of the particle
+ * @param sparts The particle array
+ * @param functionPtr The function used to convert a s-particle to a double
+ *
+ * Do not call this function directly. Use the macro defined above.
+ */
+INLINE static struct io_props io_make_output_field_convert_spart_LONGLONG(
+    const char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension,
+    enum unit_conversion_factor units, size_t spartSize,
+    const struct spart* sparts, conversion_func_spart_long_long functionPtr) {
+
+  struct io_props r;
+  strcpy(r.name, name);
+  r.type = type;
+  r.dimension = dimension;
+  r.importance = UNUSED;
+  r.units = units;
+  r.field = NULL;
+  r.partSize = spartSize;
+  r.parts = NULL;
+  r.xparts = NULL;
+  r.gparts = NULL;
+  r.sparts = sparts;
+  r.conversion = 1;
+  r.convert_part_f = NULL;
+  r.convert_part_d = NULL;
+  r.convert_part_l = NULL;
+  r.convert_gpart_f = NULL;
+  r.convert_gpart_d = NULL;
+  r.convert_gpart_l = NULL;
+  r.convert_spart_f = NULL;
+  r.convert_spart_d = NULL;
+  r.convert_spart_l = functionPtr;
 
   return r;
 }
diff --git a/src/kernel_hydro.h b/src/kernel_hydro.h
index aac06c19ce39c647ba7211f85ac0a849365d126f..3d5ec6ac84a77941739f5d3b57ed0340c831c061 100644
--- a/src/kernel_hydro.h
+++ b/src/kernel_hydro.h
@@ -124,20 +124,27 @@ static const float kernel_coeffs[(kernel_degree + 1) * (kernel_ivals + 1)]
 #define kernel_name "Wendland C2"
 #define kernel_degree 5 /* Degree of the polynomial */
 #define kernel_ivals 1  /* Number of branches */
+#if defined(HYDRO_DIMENSION_1D)
+/* Wendland C* have different form in 1D than 2D/3D */
+#define kernel_gamma ((float)(1.620185))
+#define kernel_constant ((float)(5. / 4.))
+static const float kernel_coeffs[(kernel_degree + 1) * (kernel_ivals + 1)]
+    __attribute__((aligned(16))) = {
+        0.f, -3.f, 8.f, -6.f, 0.f, 1.f, /* 0 < u < 1 */
+        0.f, 0.f,  0.f, 0.f,  0.f, 0.f};
+#else
 #if defined(HYDRO_DIMENSION_3D)
 #define kernel_gamma ((float)(1.936492))
 #define kernel_constant ((float)(21. * M_1_PI / 2.))
 #elif defined(HYDRO_DIMENSION_2D)
 #define kernel_gamma ((float)(1.897367))
 #define kernel_constant ((float)(7. * M_1_PI))
-#elif defined(HYDRO_DIMENSION_1D)
-#error "Wendland C2 kernel not defined in 1D."
 #endif
 static const float kernel_coeffs[(kernel_degree + 1) * (kernel_ivals + 1)]
     __attribute__((aligned(16))) = {
         4.f, -15.f, 20.f, -10.f, 0.f, 1.f,  /* 0 < u < 1 */
         0.f, 0.f,   0.f,  0.f,   0.f, 0.f}; /* 1 < u */
-
+#endif
 /* ------------------------------------------------------------------------- */
 #elif defined(WENDLAND_C4_KERNEL)
 
diff --git a/src/kernel_long_gravity.h b/src/kernel_long_gravity.h
index 1744f2cd046a90499563a182ca68212e43f4a252..f6580f8f72b9eb6a2b49a4d2d54a0e4d0593fcbf 100644
--- a/src/kernel_long_gravity.h
+++ b/src/kernel_long_gravity.h
@@ -90,7 +90,7 @@ __attribute__((always_inline)) INLINE static void kernel_long_grav_derivatives(
   const float r_s_inv5 = r_s_inv4 * r_s_inv;
 
   /* Derivatives of \chi */
-  derivs->chi_0 = erfcf(u);
+  derivs->chi_0 = approx_erfcf(u);
   derivs->chi_1 = -r_s_inv;
   derivs->chi_2 = r_s_inv2 * u;
   derivs->chi_3 = -r_s_inv3 * (u2 - 0.5f);
@@ -158,7 +158,7 @@ __attribute__((always_inline)) INLINE static void kernel_long_grav_pot_eval(
 #ifdef GADGET2_LONG_RANGE_CORRECTION
 
   const float arg1 = u * 0.5f;
-  const float term1 = erfcf(arg1);
+  const float term1 = approx_erfcf(arg1);
 
   *W = term1;
 #else
@@ -190,7 +190,7 @@ __attribute__((always_inline)) INLINE static void kernel_long_grav_force_eval(
   const float arg1 = u * 0.5f;
   const float arg2 = -arg1 * arg1;
 
-  const float term1 = erfcf(arg1);
+  const float term1 = approx_erfcf(arg1);
   const float term2 = u * one_over_sqrt_pi * expf(arg2);
 
   *W = term1 + term2;
diff --git a/src/kick.h b/src/kick.h
index 50ecaea498bdd401cc0ac27525ed27986a344c59..f2085bf1f427cf5f15ed0e8791ad1923f0b22bed 100644
--- a/src/kick.h
+++ b/src/kick.h
@@ -45,8 +45,8 @@ __attribute__((always_inline)) INLINE static void kick_gpart(
   if (gp->ti_kick != ti_start)
     error(
         "g-particle has not been kicked to the current time gp->ti_kick=%lld, "
-        "ti_start=%lld, ti_end=%lld",
-        gp->ti_kick, ti_start, ti_end);
+        "ti_start=%lld, ti_end=%lld id=%lld",
+        gp->ti_kick, ti_start, ti_end, gp->id_or_neg_offset);
 
   gp->ti_kick = ti_end;
 #endif
@@ -70,7 +70,8 @@ __attribute__((always_inline)) INLINE static void kick_gpart(
  * @param dt_kick_therm The kick time-step for changes in thermal state.
  * @param dt_kick_corr The kick time-step for the gizmo-mfv gravity correction.
  * @param cosmo The cosmological model.
- * @param hydro_props The constants used in the scheme
+ * @param hydro_props The constants used in the scheme.
+ * @param entropy_floor_props Properties of the entropy floor.
  * @param ti_start The starting (integer) time of the kick (for debugging
  * checks).
  * @param ti_end The ending (integer) time of the kick (for debugging checks).
@@ -79,14 +80,15 @@ __attribute__((always_inline)) INLINE static void kick_part(
     struct part *restrict p, struct xpart *restrict xp, double dt_kick_hydro,
     double dt_kick_grav, double dt_kick_therm, double dt_kick_corr,
     const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct entropy_floor_properties *entropy_floor_props,
     integertime_t ti_start, integertime_t ti_end) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (p->ti_kick != ti_start)
     error(
         "particle has not been kicked to the current time p->ti_kick=%lld, "
-        "ti_start=%lld, ti_end=%lld",
-        p->ti_kick, ti_start, ti_end);
+        "ti_start=%lld, ti_end=%lld id=%lld time_bin=%d wakeup=%d",
+        p->ti_kick, ti_start, ti_end, p->id, p->time_bin, p->wakeup);
 
   p->ti_kick = ti_end;
 #endif
@@ -114,6 +116,13 @@ __attribute__((always_inline)) INLINE static void kick_part(
   hydro_kick_extra(p, xp, dt_kick_therm, dt_kick_grav, dt_kick_hydro,
                    dt_kick_corr, cosmo, hydro_props);
   if (p->gpart != NULL) gravity_kick_extra(p->gpart, dt_kick_grav);
+
+  /* Verify that the particle is not below the entropy floor */
+  const float floor = entropy_floor(p, cosmo, entropy_floor_props);
+  if (hydro_get_physical_entropy(p, xp, cosmo) < floor) {
+    hydro_set_physical_entropy(p, xp, cosmo, floor);
+    hydro_set_physical_internal_energy_dt(p, cosmo, 0.f);
+  }
 }
 
 /**
@@ -133,8 +142,8 @@ __attribute__((always_inline)) INLINE static void kick_spart(
   if (sp->ti_kick != ti_start)
     error(
         "s-particle has not been kicked to the current time sp->ti_kick=%lld, "
-        "ti_start=%lld, ti_end=%lld",
-        sp->ti_kick, ti_start, ti_end);
+        "ti_start=%lld, ti_end=%lld id=%lld",
+        sp->ti_kick, ti_start, ti_end, sp->id);
 
   sp->ti_kick = ti_end;
 #endif
@@ -150,7 +159,7 @@ __attribute__((always_inline)) INLINE static void kick_spart(
   sp->gpart->v_full[2] = sp->v[2];
 
   /* Kick extra variables */
-  star_kick_extra(sp, dt_kick_grav);
+  stars_kick_extra(sp, dt_kick_grav);
 }
 
 #endif /* SWIFT_KICK_H */
diff --git a/src/lock.h b/src/lock.h
index b2dd2eac9d0ca5d7807907e31cf3fa31894f9aed..39601b0c52e414dad1a507b406c54640a254df30 100644
--- a/src/lock.h
+++ b/src/lock.h
@@ -34,6 +34,7 @@
 #define lock_trylock(l) (pthread_spin_lock(l) != 0)
 #define lock_unlock(l) (pthread_spin_unlock(l) != 0)
 #define lock_unlock_blind(l) pthread_spin_unlock(l)
+#define lock_static_initializer ((pthread_spinlock_t)0)
 
 #elif defined(PTHREAD_LOCK)
 #include <pthread.h>
@@ -44,6 +45,7 @@
 #define lock_trylock(l) (pthread_mutex_trylock(l) != 0)
 #define lock_unlock(l) (pthread_mutex_unlock(l) != 0)
 #define lock_unlock_blind(l) pthread_mutex_unlock(l)
+#define lock_static_initializer PTHREAD_MUTEX_INITIALIZER
 
 #else
 #define swift_lock_type volatile int
@@ -52,12 +54,12 @@
 INLINE static int lock_lock(volatile int *l) {
   while (atomic_cas(l, 0, 1) != 0)
     ;
-  // while( *l );
   return 0;
 }
 #define lock_trylock(l) ((*(l)) ? 1 : atomic_cas(l, 0, 1))
 #define lock_unlock(l) (atomic_cas(l, 1, 0) != 1)
 #define lock_unlock_blind(l) atomic_cas(l, 1, 0)
+#define lock_static_initializer 0
 #endif
 
 #endif /* SWIFT_LOCK_H */
diff --git a/src/logger.c b/src/logger.c
index 5fd4145aa1b042ed806dd3fe5487d094600b66c4..8be521b27f949ea0d496a5207335f1ec68208489 100644
--- a/src/logger.c
+++ b/src/logger.c
@@ -21,8 +21,11 @@
 #include "../config.h"
 
 #ifdef HAVE_POSIX_FALLOCATE /* Are we on a sensible platform? */
+#ifdef WITH_LOGGER
 
 /* Some standard headers. */
+#include <hdf5.h>
+#include <math.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <string.h>
@@ -33,8 +36,100 @@
 /* Local headers. */
 #include "atomic.h"
 #include "dump.h"
+#include "engine.h"
 #include "error.h"
 #include "part.h"
+#include "units.h"
+
+/*
+ * Thoses are definitions from the format and therefore should not be changed!
+ */
+/* number of bytes for a mask */
+// TODO change this to number of bits
+#define logger_mask_size 1
+
+/* number of bits for chunk header */
+#define logger_header_bytes 8
+
+/* number bytes for an offset */
+#define logger_offset_size logger_header_bytes - logger_mask_size
+
+/* number of bytes for the version information */
+#define logger_version_size 20
+
+/* number of bytes for the labels in the header */
+#define logger_label_size 20
+
+/* number of bytes for the number in the header */
+#define logger_number_size 4
+
+char logger_version[logger_version_size] = "0.1";
+
+const struct mask_data logger_mask_data[logger_count_mask] = {
+    /* Particle's position */
+    {3 * sizeof(double), 1 << logger_x, "positions"},
+    /* Particle's velocity */
+    {3 * sizeof(float), 1 << logger_v, "velocities"},
+    /* Particle's acceleration */
+    {3 * sizeof(float), 1 << logger_a, "accelerations"},
+    /* Particle's entropy */
+    {sizeof(float), 1 << logger_u, "entropy"},
+    /* Particle's smoothing length */
+    {sizeof(float), 1 << logger_h, "smoothing length"},
+    /* Particle's density */
+    {sizeof(float), 1 << logger_rho, "density"},
+    /* Particle's constants: mass (float) and ID (long long) */
+    {sizeof(float) + sizeof(long long), 1 << logger_consts, "consts"},
+    /* Simulation time stamp: integertime and double time (e.g. scale
+       factor or time) */
+    {sizeof(integertime_t) + sizeof(double), 1 << logger_timestamp,
+     "timestamp"}};
+
+/**
+ * @brief Write the header of a chunk (offset + mask).
+ *
+ * This is maybe broken for big(?) endian.
+ *
+ * @param buff The writing buffer
+ * @param mask The mask to write
+ * @param offset The old offset
+ * @param offset_new The new offset
+ *
+ * @return updated buff
+ */
+char *logger_write_chunk_header(char *buff, const unsigned int *mask,
+                                const size_t *offset, const size_t offset_new) {
+  /* write mask */
+  memcpy(buff, mask, logger_mask_size);
+  buff += logger_mask_size;
+
+  /* write offset */
+  size_t diff_offset = offset_new - *offset;
+  memcpy(buff, &diff_offset, logger_offset_size);
+  buff += logger_offset_size;
+
+  return buff;
+}
+
+/**
+ * @brief Write to the dump
+ *
+ * @param d #dump file
+ * @param offset (return) offset of the data
+ * @param size number of bytes to write
+ * @param p pointer to the data
+ */
+void logger_write_data(struct dump *d, size_t *offset, size_t size,
+                       const void *p) {
+  /* get buffer */
+  char *buff = dump_get(d, size, offset);
+
+  /* write data to the buffer */
+  memcpy(buff, p, size);
+
+  /* Update offset to end of chunk */
+  *offset += size;
+}
 
 /**
  * @brief Compute the size of a message given its mask.
@@ -43,119 +138,136 @@
  *
  * @return The size of the logger message in bytes.
  */
-int logger_size(unsigned int mask) {
+int logger_compute_chunk_size(unsigned int mask) {
 
   /* Start with 8 bytes for the header. */
-  int size = 8;
+  int size = logger_mask_size + logger_offset_size;
 
   /* Is this a particle or a timestep? */
-  if (mask & logger_mask_timestamp) {
+  if (mask & logger_mask_data[logger_timestamp].mask) {
 
     /* The timestamp should not contain any other bits. */
-    if (mask != logger_mask_timestamp)
+    if (mask != logger_mask_data[logger_timestamp].mask)
       error("Timestamps should not include any other data.");
 
     /* A timestamp consists of an unsigned long long int. */
-    size += sizeof(unsigned long long int);
+    size += logger_mask_data[logger_timestamp].size;
 
   } else {
 
-    /* Particle position as three doubles. */
-    if (mask & logger_mask_x) size += 3 * sizeof(double);
-
-    /* Particle velocity as three floats. */
-    if (mask & logger_mask_v) size += 3 * sizeof(float);
-
-    /* Particle accelleration as three floats. */
-    if (mask & logger_mask_a) size += 3 * sizeof(float);
+    for (int i = 0; i < logger_count_mask; i++) {
+      if (mask & logger_mask_data[i].mask) {
+        size += logger_mask_data[i].size;
+      }
+    }
+  }
 
-    /* Particle internal energy as a single float. */
-    if (mask & logger_mask_u) size += sizeof(float);
+  return size;
+}
 
-    /* Particle smoothing length as a single float. */
-    if (mask & logger_mask_h) size += sizeof(float);
+/**
+ * @brief log all particles in the engine.
+ *
+ * @param log The #logger
+ * @param e The #engine
+ */
+void logger_log_all(struct logger *log, const struct engine *e) {
 
-    /* Particle density as a single float. */
-    if (mask & logger_mask_rho) size += sizeof(float);
+  /* Ensure that enough space is available */
+  logger_ensure_size(log, e->total_nr_parts, e->total_nr_gparts, 0);
+#ifdef SWIFT_DEBUG_CHECKS
+  message("Need to implement stars");
+#endif
 
-    /* Particle constants, which is a bit more complicated. */
-    if (mask & logger_mask_rho) {
-      size += sizeof(float) +     // mass
-              sizeof(long long);  // id
-    }
+  /* some constants */
+  const struct space *s = e->s;
+  const unsigned int mask =
+      logger_mask_data[logger_x].mask | logger_mask_data[logger_v].mask |
+      logger_mask_data[logger_a].mask | logger_mask_data[logger_u].mask |
+      logger_mask_data[logger_h].mask | logger_mask_data[logger_rho].mask |
+      logger_mask_data[logger_consts].mask;
+
+  /* loop over all parts */
+  for (long long i = 0; i < e->total_nr_parts; i++) {
+    logger_log_part(log, &s->parts[i], mask,
+                    &s->xparts[i].logger_data.last_offset);
+    s->xparts[i].logger_data.steps_since_last_output = 0;
   }
 
-  return size;
+  /* loop over all gparts */
+  if (e->total_nr_gparts > 0) error("Not implemented");
+
+  /* loop over all sparts */
+  // TODO
 }
 
 /**
  * @brief Dump a #part to the log.
  *
+ * @param log The #logger
  * @param p The #part to dump.
  * @param mask The mask of the data to dump.
- * @param offset Pointer to the offset of the previous log of this particle.
- * @param dump The #dump in which to log the particle data.
+ * @param offset Pointer to the offset of the previous log of this particle;
+ * (return) offset of this log.
  */
-void logger_log_part(struct part *p, unsigned int mask, size_t *offset,
-                     struct dump *dump) {
+void logger_log_part(struct logger *log, const struct part *p,
+                     unsigned int mask, size_t *offset) {
 
   /* Make sure we're not writing a timestamp. */
-  if (mask & logger_mask_timestamp)
+  if (mask & logger_mask_data[logger_timestamp].mask)
     error("You should not log particles as timestamps.");
 
   /* Start by computing the size of the message. */
-  const int size = logger_size(mask);
+  const int size = logger_compute_chunk_size(mask);
 
   /* Allocate a chunk of memory in the dump of the right size. */
   size_t offset_new;
-  char *buff = (char *)dump_get(dump, size, &offset_new);
+  char *buff = (char *)dump_get(&log->dump, size, &offset_new);
 
   /* Write the header. */
-  uint64_t temp = (((uint64_t)(offset_new - *offset)) & 0xffffffffffffffULL) |
-                  ((uint64_t)mask << 56);
-  memcpy(buff, &temp, 8);
-  buff += 8;
+  buff = logger_write_chunk_header(buff, &mask, offset, offset_new);
 
   /* Particle position as three doubles. */
-  if (mask & logger_mask_x) {
-    memcpy(buff, p->x, 3 * sizeof(double));
-    buff += 3 * sizeof(double);
+  if (mask & logger_mask_data[logger_x].mask) {
+    memcpy(buff, p->x, logger_mask_data[logger_x].size);
+    buff += logger_mask_data[logger_x].size;
   }
 
   /* Particle velocity as three floats. */
-  if (mask & logger_mask_v) {
-    memcpy(buff, p->v, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_v].mask) {
+    memcpy(buff, p->v, logger_mask_data[logger_v].size);
+    buff += logger_mask_data[logger_v].size;
   }
 
   /* Particle accelleration as three floats. */
-  if (mask & logger_mask_a) {
-    memcpy(buff, p->a_hydro, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_a].mask) {
+    memcpy(buff, p->a_hydro, logger_mask_data[logger_a].size);
+    buff += logger_mask_data[logger_a].size;
   }
 
 #if defined(GADGET2_SPH)
 
   /* Particle internal energy as a single float. */
-  if (mask & logger_mask_u) {
-    memcpy(buff, &p->entropy, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_u].mask) {
+    memcpy(buff, &p->entropy, logger_mask_data[logger_u].size);
+    buff += logger_mask_data[logger_u].size;
   }
 
   /* Particle smoothing length as a single float. */
-  if (mask & logger_mask_h) {
-    memcpy(buff, &p->h, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_h].mask) {
+    memcpy(buff, &p->h, logger_mask_data[logger_h].size);
+    buff += logger_mask_data[logger_h].size;
   }
 
   /* Particle density as a single float. */
-  if (mask & logger_mask_rho) {
-    memcpy(buff, &p->rho, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_rho].mask) {
+    memcpy(buff, &p->rho, logger_mask_data[logger_rho].size);
+    buff += logger_mask_data[logger_rho].size;
   }
 
   /* Particle constants, which is a bit more complicated. */
-  if (mask & logger_mask_rho) {
+  if (mask & logger_mask_data[logger_consts].mask) {
+    // TODO make it dependent of logger_mask_data
     memcpy(buff, &p->mass, sizeof(float));
     buff += sizeof(float);
     memcpy(buff, &p->id, sizeof(long long));
@@ -171,55 +283,55 @@ void logger_log_part(struct part *p, unsigned int mask, size_t *offset,
 /**
  * @brief Dump a #gpart to the log.
  *
+ * @param log The #logger
  * @param p The #gpart to dump.
  * @param mask The mask of the data to dump.
- * @param offset Pointer to the offset of the previous log of this particle.
- * @param dump The #dump in which to log the particle data.
+ * @param offset Pointer to the offset of the previous log of this particle;
+ * (return) offset of this log.
  */
-void logger_log_gpart(struct gpart *p, unsigned int mask, size_t *offset,
-                      struct dump *dump) {
+void logger_log_gpart(struct logger *log, const struct gpart *p,
+                      unsigned int mask, size_t *offset) {
 
   /* Make sure we're not writing a timestamp. */
-  if (mask & logger_mask_timestamp)
+  if (mask & logger_mask_data[logger_timestamp].mask)
     error("You should not log particles as timestamps.");
 
   /* Make sure we're not looging fields not supported by gparts. */
-  if (mask & (logger_mask_u | logger_mask_rho))
+  if (mask &
+      (logger_mask_data[logger_u].mask | logger_mask_data[logger_rho].mask))
     error("Can't log SPH quantities for gparts.");
 
   /* Start by computing the size of the message. */
-  const int size = logger_size(mask);
+  const int size = logger_compute_chunk_size(mask);
 
   /* Allocate a chunk of memory in the dump of the right size. */
   size_t offset_new;
-  char *buff = (char *)dump_get(dump, size, &offset_new);
+  char *buff = (char *)dump_get(&log->dump, size, &offset_new);
 
   /* Write the header. */
-  uint64_t temp = (((uint64_t)(offset_new - *offset)) & 0xffffffffffffffULL) |
-                  ((uint64_t)mask << 56);
-  memcpy(buff, &temp, 8);
-  buff += 8;
+  buff = logger_write_chunk_header(buff, &mask, offset, offset_new);
 
   /* Particle position as three doubles. */
-  if (mask & logger_mask_x) {
-    memcpy(buff, p->x, 3 * sizeof(double));
-    buff += 3 * sizeof(double);
+  if (mask & logger_mask_data[logger_x].mask) {
+    memcpy(buff, p->x, logger_mask_data[logger_x].size);
+    buff += logger_mask_data[logger_x].size;
   }
 
   /* Particle velocity as three floats. */
-  if (mask & logger_mask_v) {
-    memcpy(buff, p->v_full, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_v].mask) {
+    memcpy(buff, p->v_full, logger_mask_data[logger_v].size);
+    buff += logger_mask_data[logger_v].size;
   }
 
   /* Particle accelleration as three floats. */
-  if (mask & logger_mask_a) {
-    memcpy(buff, p->a_grav, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_a].mask) {
+    memcpy(buff, p->a_grav, logger_mask_data[logger_a].size);
+    buff += logger_mask_data[logger_a].size;
   }
 
   /* Particle constants, which is a bit more complicated. */
-  if (mask & logger_mask_rho) {
+  if (mask & logger_mask_data[logger_consts].mask) {
+    // TODO make it dependent of logger_mask_data
     memcpy(buff, &p->mass, sizeof(float));
     buff += sizeof(float);
     memcpy(buff, &p->id_or_neg_offset, sizeof(long long));
@@ -230,29 +342,191 @@ void logger_log_gpart(struct gpart *p, unsigned int mask, size_t *offset,
   *offset = offset_new;
 }
 
-void logger_log_timestamp(unsigned long long int timestamp, size_t *offset,
-                          struct dump *dump) {
+/**
+ * @brief write a timestamp
+ *
+ * @param log The #logger
+ * @param timestamp time to write
+ * @param time time or scale factor
+ * @param offset Pointer to the offset of the previous log of this particle;
+ * (return) offset of this log.
+ */
+void logger_log_timestamp(struct logger *log, integertime_t timestamp,
+                          double time, size_t *offset) {
+  struct dump *dump = &log->dump;
 
   /* Start by computing the size of the message. */
-  const int size = logger_size(logger_mask_timestamp);
+  const int size =
+      logger_compute_chunk_size(logger_mask_data[logger_timestamp].mask);
 
   /* Allocate a chunk of memory in the dump of the right size. */
   size_t offset_new;
   char *buff = (char *)dump_get(dump, size, &offset_new);
 
   /* Write the header. */
-  uint64_t temp = (((uint64_t)(offset_new - *offset)) & 0xffffffffffffffULL) |
-                  ((uint64_t)logger_mask_timestamp << 56);
-  memcpy(buff, &temp, 8);
-  buff += 8;
+  unsigned int mask = logger_mask_data[logger_timestamp].mask;
+  buff = logger_write_chunk_header(buff, &mask, offset, offset_new);
 
   /* Store the timestamp. */
-  memcpy(buff, &timestamp, sizeof(unsigned long long int));
+  // TODO make it dependent of logger_mask_data
+  memcpy(buff, &timestamp, sizeof(integertime_t));
+  buff += sizeof(integertime_t);
+
+  /* Store the time */
+  memcpy(buff, &time, sizeof(double));
 
   /* Update the log message offset. */
   *offset = offset_new;
 }
 
+/**
+ * @brief Ensure that the buffer is large enough for a step.
+ *
+ * Check if logger parameters are large enough to write all particles
+ * and ensure that enough space is available in the buffer.
+ *
+ * @param log The #logger
+ * @param total_nr_parts total number of part
+ * @param total_nr_gparts total number of gpart
+ * @param total_nr_sparts total number of spart
+ */
+void logger_ensure_size(struct logger *log, size_t total_nr_parts,
+                        size_t total_nr_gparts, size_t total_nr_sparts) {
+
+  /* count part memory */
+  size_t limit = log->max_chunk_size;
+
+  limit *= total_nr_parts;
+
+  /* count gpart memory */
+  if (total_nr_gparts > 0) error("Not implemented");
+
+  /* count spart memory */
+  if (total_nr_sparts > 0) error("Not implemented");
+
+  /* ensure enough space in dump */
+  dump_ensure(&log->dump, limit, log->buffer_scale * limit);
+}
+
+/**
+ * @brief intialize the logger structure
+ *
+ * @param log The #logger
+ * @param params The #swift_params
+ */
+void logger_init(struct logger *log, struct swift_params *params) {
+  /* read parameters */
+  log->delta_step = parser_get_param_int(params, "Logger:delta_step");
+  size_t buffer_size =
+      parser_get_opt_param_float(params, "Logger:initial_buffer_size", 0.5) *
+      1e9;
+  log->buffer_scale =
+      parser_get_opt_param_float(params, "Logger:buffer_scale", 10);
+  parser_get_param_string(params, "Logger:basename", log->base_name);
+
+  /* set initial value of parameters */
+  log->timestamp_offset = 0;
+
+  /* generate dump filename */
+  char logger_name_file[PARSER_MAX_LINE_SIZE];
+  strcpy(logger_name_file, log->base_name);
+  strcat(logger_name_file, ".dump");
+
+  /* Compute max size for a particle chunk */
+  int max_size = logger_offset_size + logger_mask_size;
+
+  /* Loop over all fields except timestamp */
+  for (int i = 0; i < logger_count_mask - 1; i++) {
+    max_size += logger_mask_data[i].size;
+  }
+  log->max_chunk_size = max_size;
+
+  /* init dump */
+  dump_init(&log->dump, logger_name_file, buffer_size);
+}
+
+/**
+ * @brief Close dump file and desallocate memory
+ *
+ * @param log The #logger
+ */
+void logger_clean(struct logger *log) { dump_close(&log->dump); }
+
+/**
+ * @brief Write a file header to a logger file
+ *
+ * @param log The #logger
+ * @param dump The #dump in which to log the particle data.
+ *
+ */
+void logger_write_file_header(struct logger *log, const struct engine *e) {
+
+  /* get required variables */
+  struct dump *dump = &log->dump;
+
+  size_t file_offset = dump->file_offset;
+
+  if (file_offset != 0)
+    error(
+        "The logger is not empty."
+        "This function should be called before writing anything in the logger");
+
+  /* Write version information */
+  logger_write_data(dump, &file_offset, logger_version_size, &logger_version);
+
+  /* write offset direction */
+  const int reversed = 0;
+  logger_write_data(dump, &file_offset, logger_number_size, &reversed);
+
+  /* placeholder to write the offset of the first log here */
+  char *skip_header = dump_get(dump, logger_offset_size, &file_offset);
+
+  /* write number of bytes used for names */
+  const int label_size = logger_label_size;
+  logger_write_data(dump, &file_offset, logger_number_size, &label_size);
+
+  /* write number of masks */
+  int count_mask = logger_count_mask;
+  logger_write_data(dump, &file_offset, logger_number_size, &count_mask);
+
+  /* write masks */
+  // loop over all mask type
+  for (int i = 0; i < logger_count_mask; i++) {
+    // mask name
+    logger_write_data(dump, &file_offset, logger_label_size,
+                      &logger_mask_data[i].name);
+
+    // mask size
+    logger_write_data(dump, &file_offset, logger_number_size,
+                      &logger_mask_data[i].size);
+  }
+
+  /* last step: write first offset */
+  memcpy(skip_header, &file_offset, logger_offset_size);
+}
+
+/**
+ * @brief read chunk header
+ *
+ * @param buff The reading buffer
+ * @param mask The mask to read
+ * @param offset (return) the offset pointed by this chunk (absolute)
+ * @param offset_cur The current chunk offset
+ *
+ * @return Number of bytes read
+ */
+__attribute__((always_inline)) INLINE static int logger_read_chunk_header(
+    const char *buff, unsigned int *mask, size_t *offset, size_t cur_offset) {
+  memcpy(mask, buff, logger_mask_size);
+  buff += logger_mask_size;
+
+  *offset = 0;
+  memcpy(offset, buff, logger_offset_size);
+  *offset = cur_offset - *offset;
+
+  return logger_mask_size + logger_offset_size;
+}
+
 /**
  * @brief Read a logger message and store the data in a #part.
  *
@@ -269,56 +543,55 @@ int logger_read_part(struct part *p, size_t *offset, const char *buff) {
   buff = &buff[*offset];
 
   /* Start by reading the logger mask for this entry. */
-  uint64_t temp;
-  memcpy(&temp, buff, 8);
-  const int mask = temp >> 56;
-  *offset -= temp & 0xffffffffffffffULL;
-  buff += 8;
+  const size_t cur_offset = *offset;
+  unsigned int mask = 0;
+  buff += logger_read_chunk_header(buff, &mask, offset, cur_offset);
 
   /* We are only interested in particle data. */
-  if (mask & logger_mask_timestamp)
+  if (mask & logger_mask_data[logger_timestamp].mask)
     error("Trying to read timestamp as particle.");
 
   /* Particle position as three doubles. */
-  if (mask & logger_mask_x) {
-    memcpy(p->x, buff, 3 * sizeof(double));
-    buff += 3 * sizeof(double);
+  if (mask & logger_mask_data[logger_x].mask) {
+    memcpy(p->x, buff, logger_mask_data[logger_x].size);
+    buff += logger_mask_data[logger_x].size;
   }
 
   /* Particle velocity as three floats. */
-  if (mask & logger_mask_v) {
-    memcpy(p->v, buff, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_v].mask) {
+    memcpy(p->v, buff, logger_mask_data[logger_v].size);
+    buff += logger_mask_data[logger_v].size;
   }
 
   /* Particle accelleration as three floats. */
-  if (mask & logger_mask_a) {
-    memcpy(p->a_hydro, buff, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_a].mask) {
+    memcpy(p->a_hydro, buff, logger_mask_data[logger_a].size);
+    buff += logger_mask_data[logger_a].size;
   }
 
 #if defined(GADGET2_SPH)
 
   /* Particle internal energy as a single float. */
-  if (mask & logger_mask_u) {
-    memcpy(&p->entropy, buff, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_u].mask) {
+    memcpy(&p->entropy, buff, logger_mask_data[logger_u].size);
+    buff += logger_mask_data[logger_u].size;
   }
 
   /* Particle smoothing length as a single float. */
-  if (mask & logger_mask_h) {
-    memcpy(&p->h, buff, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_h].mask) {
+    memcpy(&p->h, buff, logger_mask_data[logger_h].size);
+    buff += logger_mask_data[logger_h].size;
   }
 
   /* Particle density as a single float. */
-  if (mask & logger_mask_rho) {
-    memcpy(&p->rho, buff, sizeof(float));
-    buff += sizeof(float);
+  if (mask & logger_mask_data[logger_rho].mask) {
+    memcpy(&p->rho, buff, logger_mask_data[logger_rho].size);
+    buff += logger_mask_data[logger_rho].size;
   }
 
   /* Particle constants, which is a bit more complicated. */
-  if (mask & logger_mask_rho) {
+  if (mask & logger_mask_data[logger_rho].mask) {
+    // TODO make it dependent of logger_mask_data
     memcpy(&p->mass, buff, sizeof(float));
     buff += sizeof(float);
     memcpy(&p->id, buff, sizeof(long long));
@@ -347,40 +620,40 @@ int logger_read_gpart(struct gpart *p, size_t *offset, const char *buff) {
   buff = &buff[*offset];
 
   /* Start by reading the logger mask for this entry. */
-  uint64_t temp;
-  memcpy(&temp, buff, 8);
-  const int mask = temp >> 56;
-  *offset -= temp & 0xffffffffffffffULL;
-  buff += 8;
+  const size_t cur_offset = *offset;
+  unsigned int mask = 0;
+  buff += logger_read_chunk_header(buff, &mask, offset, cur_offset);
 
   /* We are only interested in particle data. */
-  if (mask & logger_mask_timestamp)
+  if (mask & logger_mask_data[logger_timestamp].mask)
     error("Trying to read timestamp as particle.");
 
   /* We can't store all part fields in a gpart. */
-  if (mask & (logger_mask_u | logger_mask_rho))
+  if (mask &
+      (logger_mask_data[logger_u].mask | logger_mask_data[logger_rho].mask))
     error("Trying to read SPH quantities into a gpart.");
 
   /* Particle position as three doubles. */
-  if (mask & logger_mask_x) {
-    memcpy(p->x, buff, 3 * sizeof(double));
-    buff += 3 * sizeof(double);
+  if (mask & logger_mask_data[logger_x].mask) {
+    memcpy(p->x, buff, logger_mask_data[logger_x].size);
+    buff += logger_mask_data[logger_x].size;
   }
 
   /* Particle velocity as three floats. */
-  if (mask & logger_mask_v) {
-    memcpy(p->v_full, buff, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_v].mask) {
+    memcpy(p->v_full, buff, logger_mask_data[logger_v].size);
+    buff += logger_mask_data[logger_v].size;
   }
 
   /* Particle accelleration as three floats. */
-  if (mask & logger_mask_a) {
-    memcpy(p->a_grav, buff, 3 * sizeof(float));
-    buff += 3 * sizeof(float);
+  if (mask & logger_mask_data[logger_a].mask) {
+    memcpy(p->a_grav, buff, logger_mask_data[logger_a].size);
+    buff += logger_mask_data[logger_a].size;
   }
 
   /* Particle constants, which is a bit more complicated. */
-  if (mask & logger_mask_rho) {
+  if (mask & logger_mask_data[logger_rho].mask) {
+    // TODO make it dependent of logger_mask_data
     memcpy(&p->mass, buff, sizeof(float));
     buff += sizeof(float);
     memcpy(&p->id_or_neg_offset, buff, sizeof(long long));
@@ -401,32 +674,37 @@ int logger_read_gpart(struct gpart *p, size_t *offset, const char *buff) {
  *
  * @return The mask containing the values read.
  */
-int logger_read_timestamp(unsigned long long int *t, size_t *offset,
-                          const char *buff) {
+int logger_read_timestamp(unsigned long long int *t, double *time,
+                          size_t *offset, const char *buff) {
 
   /* Jump to the offset. */
   buff = &buff[*offset];
 
   /* Start by reading the logger mask for this entry. */
-  uint64_t temp;
-  memcpy(&temp, buff, 8);
-  const int mask = temp >> 56;
-  *offset -= temp & 0xffffffffffffffULL;
-  buff += 8;
+  const size_t cur_offset = *offset;
+  unsigned int mask = 0;
+  buff += logger_read_chunk_header(buff, &mask, offset, cur_offset);
 
   /* We are only interested in timestamps. */
-  if (!(mask & logger_mask_timestamp))
+  if (!(mask & logger_mask_data[logger_timestamp].mask))
     error("Trying to read timestamp from a particle.");
 
   /* Make sure we don't have extra fields. */
-  if (mask != logger_mask_timestamp)
+  if (mask != logger_mask_data[logger_timestamp].mask)
     error("Timestamp message contains extra fields.");
 
   /* Copy the timestamp value from the buffer. */
+  // TODO make it dependent of logger_mask_data
   memcpy(t, buff, sizeof(unsigned long long int));
+  buff += sizeof(unsigned long long int);
+
+  /* Copy the timestamp value from the buffer. */
+  memcpy(time, buff, sizeof(double));
 
   /* Finally, return the mask of the values we just read. */
   return mask;
 }
 
+#endif /* WITH_LOGGER */
+
 #endif /* HAVE_POSIX_FALLOCATE */
diff --git a/src/logger.h b/src/logger.h
index 596c0903750404d0934e0d3843a5461523700e9e..56e2c8ab94c66b24df1800877bb9cfb129c3e645 100644
--- a/src/logger.h
+++ b/src/logger.h
@@ -19,11 +19,21 @@
 #ifndef SWIFT_LOGGER_H
 #define SWIFT_LOGGER_H
 
+#ifdef WITH_LOGGER
+
 /* Includes. */
-#include "part.h"
+#include "common_io.h"
+#include "dump.h"
+#include "inline.h"
+#include "timeline.h"
+#include "units.h"
 
 /* Forward declaration */
 struct dump;
+struct gpart;
+struct part;
+/* TODO remove dependency */
+struct engine;
 
 /**
  * Logger entries contain messages representing the particle data at a given
@@ -59,31 +69,111 @@ struct dump;
  * The offset refers to the relative location of the previous message for the
  * same particle or for the previous timestamp (if mask bit 7 is set). I.e.
  * the previous log entry will be at the address of the current mask byte minus
- * the unsigned value stored in the offset. An offset of zero indicates that
- * this is the first message for the given particle/timestamp.
+ * the unsigned value stored in the offset. An offset equal to the chunk offset
+ * indicated that this is the first message for the given particle/timestamp.
  */
 
 /* Some constants. */
-#define logger_mask_x 1
-#define logger_mask_v 2
-#define logger_mask_a 4
-#define logger_mask_u 8
-#define logger_mask_h 16
-#define logger_mask_rho 32
-#define logger_mask_consts 64
-#define logger_mask_timestamp 128
+enum logger_masks_number {
+  logger_x = 0,
+  logger_v = 1,
+  logger_a = 2,
+  logger_u = 3,
+  logger_h = 4,
+  logger_rho = 5,
+  logger_consts = 6,
+  logger_timestamp = 7,  /* expect it to be before count */
+  logger_count_mask = 8, /* Need to be the last */
+} __attribute__((packed));
+
+struct mask_data {
+  /* Number of bytes for a mask */
+  int size;
+  /* Mask value */
+  unsigned int mask;
+  /* name of the mask */
+  char name[100];
+};
+
+extern const struct mask_data logger_mask_data[logger_count_mask];
+
+/* Size of the strings. */
+#define logger_string_length 200
+
+/* structure containing global data */
+struct logger {
+  /* Number of particle steps between dumping a chunk of data */
+  short int delta_step;
+
+  /* Logger basename */
+  char base_name[logger_string_length];
+
+  /* Dump file */
+  struct dump dump;
+
+  /* timestamp offset for logger*/
+  size_t timestamp_offset;
+
+  /* scaling factor when buffer is too small */
+  float buffer_scale;
+
+  /* Size of a chunk if every mask are activated */
+  int max_chunk_size;
+
+} SWIFT_STRUCT_ALIGN;
+
+/* required structure for each particle type */
+struct logger_part_data {
+  /* Number of particle updates since last output */
+  int steps_since_last_output;
+
+  /* offset of last particle log entry */
+  size_t last_offset;
+};
 
 /* Function prototypes. */
-int logger_size(unsigned int mask);
-void logger_log_part(struct part *p, unsigned int mask, size_t *offset,
-                     struct dump *dump);
-void logger_log_gpart(struct gpart *p, unsigned int mask, size_t *offset,
-                      struct dump *dump);
-void logger_log_timestamp(unsigned long long int t, size_t *offset,
-                          struct dump *dump);
+int logger_compute_chunk_size(unsigned int mask);
+void logger_log_all(struct logger *log, const struct engine *e);
+void logger_log_part(struct logger *log, const struct part *p,
+                     unsigned int mask, size_t *offset);
+void logger_log_gpart(struct logger *log, const struct gpart *p,
+                      unsigned int mask, size_t *offset);
+void logger_init(struct logger *log, struct swift_params *params);
+void logger_clean(struct logger *log);
+void logger_log_timestamp(struct logger *log, integertime_t t, double time,
+                          size_t *offset);
+void logger_ensure_size(struct logger *log, size_t total_nr_parts,
+                        size_t total_nr_gparts, size_t total_nr_sparts);
+void logger_write_file_header(struct logger *log, const struct engine *e);
+
 int logger_read_part(struct part *p, size_t *offset, const char *buff);
 int logger_read_gpart(struct gpart *p, size_t *offset, const char *buff);
-int logger_read_timestamp(unsigned long long int *t, size_t *offset,
-                          const char *buff);
+int logger_read_timestamp(unsigned long long int *t, double *time,
+                          size_t *offset, const char *buff);
+
+/**
+ * @brief Initialize the logger data for a particle.
+ *
+ * @param logger The #logger_part_data.
+ */
+INLINE static void logger_part_data_init(struct logger_part_data *logger) {
+  logger->last_offset = 0;
+  logger->steps_since_last_output = INT_MAX;
+}
+
+/**
+ * @brief Should this particle write its data now ?
+ *
+ * @param xp The #xpart.
+ * @param e The #engine containing information about the current time.
+ * @return 1 if the #part should write, 0 otherwise.
+ */
+__attribute__((always_inline)) INLINE static int logger_should_write(
+    const struct logger_part_data *logger_data, const struct logger *log) {
+
+  return (logger_data->steps_since_last_output > log->delta_step);
+}
+
+#endif /* WITH_LOGGER */
 
 #endif /* SWIFT_LOGGER_H */
diff --git a/src/logger_io.c b/src/logger_io.c
new file mode 100644
index 0000000000000000000000000000000000000000..3cef3497b2912411cea6763f5418bc76a7f5ece0
--- /dev/null
+++ b/src/logger_io.c
@@ -0,0 +1,299 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
+ *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+#ifdef WITH_LOGGER
+
+/* Some standard headers. */
+#include <hdf5.h>
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* This object's header. */
+#include "logger_io.h"
+
+/* Local includes. */
+#include "chemistry_io.h"
+#include "common_io.h"
+#include "cooling.h"
+#include "dimension.h"
+#include "engine.h"
+#include "error.h"
+#include "gravity_io.h"
+#include "gravity_properties.h"
+#include "hydro_io.h"
+#include "hydro_properties.h"
+#include "io_properties.h"
+#include "kernel_hydro.h"
+#include "parallel_io.h"
+#include "part.h"
+#include "serial_io.h"
+#include "single_io.h"
+#include "stars_io.h"
+#include "units.h"
+#include "xmf.h"
+
+/**
+ * @brief Writes an HDF5 index file
+ *
+ * @param e The engine containing all the system.
+ * @param baseName The common part of the snapshot file name.
+ * @param internal_units The #unit_system used internally
+ * @param snapshot_units The #unit_system used in the snapshots
+ *
+ * Creates an HDF5 output file and writes the offset and id of particles
+ * contained in the engine. If such a file already exists, it is erased and
+ * replaced by the new one.
+ *
+ * Calls #error() if an error occurs.
+ *
+ */
+void write_index_single(struct engine* e, const char* baseName,
+                        const struct unit_system* internal_units,
+                        const struct unit_system* snapshot_units) {
+
+  hid_t h_file = 0, h_grp = 0;
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  const size_t Ntot = e->s->nr_gparts;
+  const int periodic = e->s->periodic;
+  int numFiles = 1;
+  struct part* parts = e->s->parts;
+  struct xpart* xparts = e->s->xparts;
+  // struct gpart* gparts = e->s->gparts;
+  struct gpart* dmparts = NULL;
+  // struct spart* sparts = e->s->sparts;
+  static int outputCount = 0;
+
+  struct logger* log = e->logger;
+
+  /* Number of unassociated gparts */
+  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
+
+  long long N_total[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
+
+  /* File name */
+  char fileName[FILENAME_BUFFER_SIZE];
+  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+           outputCount);
+
+  /* Open file */
+  /* message("Opening file '%s'.", fileName); */
+  h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_file < 0) {
+    error("Error while opening file '%s'.", fileName);
+  }
+
+  /* Open header to write simulation properties */
+  /* message("Writing runtime parameters..."); */
+  h_grp =
+      H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_grp < 0) error("Error while creating runtime parameters group\n");
+
+  /* Write the relevant information */
+  io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
+
+  /* Close runtime parameters */
+  H5Gclose(h_grp);
+
+  /* Open header to write simulation properties */
+  /* message("Writing file header..."); */
+  h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_grp < 0) error("Error while creating file header\n");
+
+  /* Print the relevant information and print status */
+  io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
+  double dblTime = e->time;
+  io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
+  io_write_attribute(h_grp, "Time Offset", UINT, &log->timestamp_offset, 1);
+  int dimension = (int)hydro_dimension;
+  io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
+
+  /* GADGET-2 legacy values */
+  /* Number of particles of each type */
+  unsigned int numParticles[swift_type_count] = {0};
+  unsigned int numParticlesHighWord[swift_type_count] = {0};
+  for (int ptype = 0; ptype < swift_type_count; ++ptype) {
+    numParticles[ptype] = (unsigned int)N_total[ptype];
+    numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
+  }
+  io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total,
+                     swift_type_count);
+  io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
+                     swift_type_count);
+  io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT,
+                     numParticlesHighWord, swift_type_count);
+  double MassTable[swift_type_count] = {0};
+  io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count);
+  unsigned int flagEntropy[swift_type_count] = {0};
+  flagEntropy[0] = writeEntropyFlag();
+  io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy,
+                     swift_type_count);
+  io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);
+
+  /* Close header */
+  H5Gclose(h_grp);
+
+  /* Print the code version */
+  io_write_code_description(h_file);
+
+  /* Print the SPH parameters */
+  if (e->policy & engine_policy_hydro) {
+    h_grp = H5Gcreate(h_file, "/HydroScheme", H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0) error("Error while creating SPH group");
+    hydro_props_print_snapshot(h_grp, e->hydro_properties);
+    hydro_write_flavour(h_grp);
+    H5Gclose(h_grp);
+  }
+
+  /* Print the gravity parameters */
+  if (e->policy & engine_policy_self_gravity) {
+    h_grp = H5Gcreate(h_file, "/GravityScheme", H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0) error("Error while creating gravity group");
+    gravity_props_print_snapshot(h_grp, e->gravity_properties);
+    H5Gclose(h_grp);
+  }
+
+  /* Print the runtime parameters */
+  h_grp =
+      H5Gcreate(h_file, "/Parameters", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_grp < 0) error("Error while creating parameters group");
+  parser_write_params_to_hdf5(e->parameter_file, h_grp, 1);
+  H5Gclose(h_grp);
+
+  /* Print the runtime unused parameters */
+  h_grp = H5Gcreate(h_file, "/UnusedParameters", H5P_DEFAULT, H5P_DEFAULT,
+                    H5P_DEFAULT);
+  if (h_grp < 0) error("Error while creating parameters group");
+  parser_write_params_to_hdf5(e->parameter_file, h_grp, 0);
+  H5Gclose(h_grp);
+
+  /* Print the system of Units used in the spashot */
+  io_write_unit_system(h_file, snapshot_units, "Units");
+
+  /* Print the system of Units used internally */
+  io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
+
+  /* Tell the user if a conversion will be needed */
+  if (e->verbose) {
+    if (units_are_equal(snapshot_units, internal_units)) {
+
+      message("Snapshot and internal units match. No conversion needed.");
+
+    } else {
+
+      message("Conversion needed from:");
+      message("(Snapshot) Unit system: U_M =      %e g.",
+              snapshot_units->UnitMass_in_cgs);
+      message("(Snapshot) Unit system: U_L =      %e cm.",
+              snapshot_units->UnitLength_in_cgs);
+      message("(Snapshot) Unit system: U_t =      %e s.",
+              snapshot_units->UnitTime_in_cgs);
+      message("(Snapshot) Unit system: U_I =      %e A.",
+              snapshot_units->UnitCurrent_in_cgs);
+      message("(Snapshot) Unit system: U_T =      %e K.",
+              snapshot_units->UnitTemperature_in_cgs);
+      message("to:");
+      message("(internal) Unit system: U_M = %e g.",
+              internal_units->UnitMass_in_cgs);
+      message("(internal) Unit system: U_L = %e cm.",
+              internal_units->UnitLength_in_cgs);
+      message("(internal) Unit system: U_t = %e s.",
+              internal_units->UnitTime_in_cgs);
+      message("(internal) Unit system: U_I = %e A.",
+              internal_units->UnitCurrent_in_cgs);
+      message("(internal) Unit system: U_T = %e K.",
+              internal_units->UnitTemperature_in_cgs);
+    }
+  }
+
+  /* Loop over all particle types */
+  for (int ptype = 0; ptype < swift_type_count; ptype++) {
+
+    /* Don't do anything if no particle of this kind */
+    if (numParticles[ptype] == 0) continue;
+
+    /* Open the particle group in the file */
+    char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
+    snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
+             ptype);
+    h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0) {
+      error("Error while creating particle group.\n");
+    }
+
+    int num_fields = 0;
+    struct io_props list[100];
+    size_t N = 0;
+
+    /* Write particle fields from the particle structure */
+    switch (ptype) {
+
+      case swift_type_gas:
+        N = Ngas;
+        hydro_write_index(parts, xparts, list, &num_fields);
+        break;
+
+      case swift_type_dark_matter:
+        error("TODO");
+        break;
+
+      case swift_type_stars:
+        N = Nstars;
+        error("TODO");
+        // star_write_index(sparts, list, &num_fields);
+        break;
+
+      default:
+        error("Particle Type %d not yet supported. Aborting", ptype);
+    }
+
+    /* Write everything */
+    for (int i = 0; i < num_fields; ++i)
+      writeArray(e, h_grp, fileName, NULL, partTypeGroupName, list[i], N,
+                 internal_units, snapshot_units);
+
+    /* Free temporary array */
+    if (dmparts) {
+      free(dmparts);
+      dmparts = NULL;
+    }
+
+    /* Close particle group */
+    H5Gclose(h_grp);
+  }
+
+  /* message("Done writing particles..."); */
+
+  /* Close file */
+  H5Fclose(h_file);
+
+  ++outputCount;
+}
+
+#endif /* HAVE_HDF5 */
diff --git a/src/logger_io.h b/src/logger_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..f5b1274fb7b957d5b48bc8425bf784c586ac6a08
--- /dev/null
+++ b/src/logger_io.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2012 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_LOGGER_IO_H
+#define SWIFT_LOGGER_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+#ifdef WITH_LOGGER
+
+/* Includes. */
+#include "engine.h"
+#include "io_properties.h"
+#include "part.h"
+#include "units.h"
+
+void write_index_single(struct engine* e, const char* baseName,
+                        const struct unit_system* internal_units,
+                        const struct unit_system* snapshot_units);
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
+ *
+ * In this version, we only want the ids and the offset.
+ */
+__attribute__((always_inline)) INLINE static void hydro_write_index(
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
+    int* num_fields) {
+
+  *num_fields = 2;
+
+  /* List what we want to write */
+  list[0] = io_make_output_field("ParticleIDs", ULONGLONG, 1,
+                                 UNIT_CONV_NO_UNITS, parts, id);
+
+  list[1] = io_make_output_field("Offset", ULONGLONG, 1, UNIT_CONV_NO_UNITS,
+                                 xparts, logger_data.last_offset);
+}
+#endif
+
+#endif /* SWIFT_LOGGER_IO_H */
diff --git a/src/map.c b/src/map.c
index b0a3117388cb77f4311cf7ee3c5d62a0937da655..68c3618fcdb10a618a97e5d1a2565d58db677cdb 100644
--- a/src/map.c
+++ b/src/map.c
@@ -73,9 +73,9 @@ void map_cells_plot(struct cell *c, void *data) {
     printf("%.16e %.16e %.16e\n\n\n", l[0] + h[0], l[1] + h[1], l[2]);
 
     if (!c->split) {
-      for (int k = 0; k < c->count; k++)
-        printf("0 0 0 %.16e %.16e %.16e\n", c->parts[k].x[0], c->parts[k].x[1],
-               c->parts[k].x[2]);
+      for (int k = 0; k < c->hydro.count; k++)
+        printf("0 0 0 %.16e %.16e %.16e\n", c->hydro.parts[k].x[0],
+               c->hydro.parts[k].x[1], c->hydro.parts[k].x[2]);
       printf("\n\n");
     }
     /* else
@@ -102,11 +102,11 @@ void map_check(struct part *p, struct cell *c, void *data) {
 void map_cellcheck(struct cell *c, void *data) {
 
   int *count = (int *)data;
-  atomic_add(count, c->count);
+  atomic_add(count, c->hydro.count);
 
   /* Loop over all parts and check if they are in the cell. */
-  for (int k = 0; k < c->count; k++) {
-    struct part *p = &c->parts[k];
+  for (int k = 0; k < c->hydro.count; k++) {
+    struct part *p = &c->hydro.parts[k];
     if (p->x[0] < c->loc[0] || p->x[1] < c->loc[1] || p->x[2] < c->loc[2] ||
         p->x[0] > c->loc[0] + c->width[0] ||
         p->x[1] > c->loc[1] + c->width[1] ||
@@ -122,8 +122,8 @@ void map_cellcheck(struct cell *c, void *data) {
   }
 
   /* Loop over all gparts and check if they are in the cell. */
-  for (int k = 0; k < c->gcount; k++) {
-    struct gpart *p = &c->gparts[k];
+  for (int k = 0; k < c->grav.count; k++) {
+    struct gpart *p = &c->grav.parts[k];
     if (p->x[0] < c->loc[0] || p->x[1] < c->loc[1] || p->x[2] < c->loc[2] ||
         p->x[0] > c->loc[0] + c->width[0] ||
         p->x[1] > c->loc[1] + c->width[1] ||
@@ -191,6 +191,13 @@ void map_h_max(struct part *p, struct cell *c, void *data) {
   if (p->h > (*p2)->h) *p2 = p;
 }
 
+void map_stars_h_max(struct spart *p, struct cell *c, void *data) {
+
+  struct spart **p2 = (struct spart **)data;
+
+  if (p->h > (*p2)->h) *p2 = p;
+}
+
 /**
  * @brief Mapping function for neighbour count.
  */
diff --git a/src/map.h b/src/map.h
index 950a5fd96ebdc7177b41912b1565163f33de8701..6ad05e30df0644e1ee37b1b912bc11681ccf837c 100644
--- a/src/map.h
+++ b/src/map.h
@@ -34,6 +34,7 @@ void map_wcount_min(struct part *p, struct cell *c, void *data);
 void map_wcount_max(struct part *p, struct cell *c, void *data);
 void map_h_min(struct part *p, struct cell *c, void *data);
 void map_h_max(struct part *p, struct cell *c, void *data);
+void map_stars_h_max(struct spart *p, struct cell *c, void *data);
 void map_icount(struct part *p, struct cell *c, void *data);
 void map_dump(struct part *p, struct cell *c, void *data);
 
diff --git a/src/memswap.h b/src/memswap.h
index 2f7b9215ed48535fab9e8331303457c2f92859cd..330173100f41b80fcc65c9fce01838b5de8e778f 100644
--- a/src/memswap.h
+++ b/src/memswap.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
  * This file is part of SWIFT.
  * Copyright (c) 2016 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
- *
+ *               2018 STFC (author email aidan.chalk@stfc.ac.uk)
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published
  * by the Free Software Foundation, either version 3 of the License, or
@@ -20,6 +20,7 @@
 #define SWIFT_MEMSWAP_H
 
 /* Config parameters. */
+#include <stdint.h>
 #include "../config.h"
 
 #ifdef HAVE_IMMINTRIN_H
@@ -33,7 +34,7 @@
 #endif
 
 /* Macro for in-place swap of two values a and b of type t. a and b are
-   assumed to be of type char* so that the pointer arithmetic works. */
+   assumed to be of type uint8_t* so that the pointer arithmetic works. */
 #define swap_loop(type, a, b, count) \
   while (count >= sizeof(type)) {    \
     register type temp = *(type *)a; \
@@ -60,9 +61,10 @@
  * @param void_b Pointer to the second element.
  * @param bytes Size, in bytes, of the data pointed to by @c a and @c b.
  */
-__attribute__((always_inline)) inline void memswap(void *void_a, void *void_b,
+__attribute__((always_inline)) inline void memswap(void *restrict void_a,
+                                                   void *restrict void_b,
                                                    size_t bytes) {
-  char *a = (char *)void_a, *b = (char *)void_b;
+  int8_t *restrict a = (int8_t *)void_a, *restrict b = (int8_t *)void_b;
 #if defined(__AVX512F__) && defined(__INTEL_COMPILER)
   swap_loop(__m512i, a, b, bytes);
 #endif
@@ -75,10 +77,17 @@ __attribute__((always_inline)) inline void memswap(void *void_a, void *void_b,
 #ifdef __ALTIVEC__
   swap_loop(vector int, a, b, bytes);
 #endif
-  swap_loop(size_t, a, b, bytes);
-  swap_loop(int, a, b, bytes);
-  swap_loop(short, a, b, bytes);
-  swap_loop(char, a, b, bytes);
+  swap_loop(int_least64_t, a, b, bytes);
+  swap_loop(int_least32_t, a, b, bytes);
+  swap_loop(int_least16_t, a, b, bytes);
+  swap_loop(int_least8_t, a, b, bytes);
+
+  /* This is a known bug for the current version of clang on ARM.
+   * We add this synchronization as a temporary bug fix.
+   * See https://bugs.llvm.org/show_bug.cgi?id=40051 */
+#if defined(__clang__) && defined(__aarch64__)
+  __sync_synchronize();
+#endif
 }
 
 /**
@@ -93,10 +102,9 @@ __attribute__((always_inline)) inline void memswap(void *void_a, void *void_b,
  * @param void_b Pointer to the second element.
  * @param bytes Size, in bytes, of the data pointed to by @c a and @c b.
  */
-__attribute__((always_inline)) inline void memswap_unaligned(void *void_a,
-                                                             void *void_b,
-                                                             size_t bytes) {
-  char *a = (char *)void_a, *b = (char *)void_b;
+__attribute__((always_inline)) inline void memswap_unaligned(
+    void *restrict void_a, void *restrict void_b, size_t bytes) {
+  int8_t *restrict a = (int8_t *)void_a, *restrict b = (int8_t *)void_b;
 #ifdef __AVX512F__
   while (bytes >= sizeof(__m512i)) {
     register __m512i temp;
@@ -134,10 +142,17 @@ __attribute__((always_inline)) inline void memswap_unaligned(void *void_a,
   // Power8 supports unaligned load/stores, but not sure what it will do here.
   swap_loop(vector int, a, b, bytes);
 #endif
-  swap_loop(size_t, a, b, bytes);
-  swap_loop(int, a, b, bytes);
-  swap_loop(short, a, b, bytes);
-  swap_loop(char, a, b, bytes);
+  swap_loop(int_least64_t, a, b, bytes);
+  swap_loop(int_least32_t, a, b, bytes);
+  swap_loop(int_least16_t, a, b, bytes);
+  swap_loop(int_least8_t, a, b, bytes);
+
+  /* This is a known bug for the current version of clang on ARM.
+   * We add this synchronization as a temporary bug fix.
+   * See https://bugs.llvm.org/show_bug.cgi?id=40051 */
+#if defined(__clang__) && defined(__aarch64__)
+  __sync_synchronize();
+#endif
 }
 
 #endif /* SWIFT_MEMSWAP_H */
diff --git a/src/mesh_gravity.c b/src/mesh_gravity.c
index 2359b8a9cdf785bce719a1d0379d177d00328b9e..e7005b083c94e20f5218923e443f71464ab383e1 100644
--- a/src/mesh_gravity.c
+++ b/src/mesh_gravity.c
@@ -110,14 +110,22 @@ __attribute__((always_inline)) INLINE static void CIC_set(
     double dx, double dy, double dz, double value) {
 
   /* Classic CIC interpolation */
-  mesh[row_major_id_periodic(i + 0, j + 0, k + 0, N)] += value * tx * ty * tz;
-  mesh[row_major_id_periodic(i + 0, j + 0, k + 1, N)] += value * tx * ty * dz;
-  mesh[row_major_id_periodic(i + 0, j + 1, k + 0, N)] += value * tx * dy * tz;
-  mesh[row_major_id_periodic(i + 0, j + 1, k + 1, N)] += value * tx * dy * dz;
-  mesh[row_major_id_periodic(i + 1, j + 0, k + 0, N)] += value * dx * ty * tz;
-  mesh[row_major_id_periodic(i + 1, j + 0, k + 1, N)] += value * dx * ty * dz;
-  mesh[row_major_id_periodic(i + 1, j + 1, k + 0, N)] += value * dx * dy * tz;
-  mesh[row_major_id_periodic(i + 1, j + 1, k + 1, N)] += value * dx * dy * dz;
+  atomic_add_d(&mesh[row_major_id_periodic(i + 0, j + 0, k + 0, N)],
+               value * tx * ty * tz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 0, j + 0, k + 1, N)],
+               value * tx * ty * dz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 0, j + 1, k + 0, N)],
+               value * tx * dy * tz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 0, j + 1, k + 1, N)],
+               value * tx * dy * dz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 1, j + 0, k + 0, N)],
+               value * dx * ty * tz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 1, j + 0, k + 1, N)],
+               value * dx * ty * dz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 1, j + 1, k + 0, N)],
+               value * dx * dy * tz);
+  atomic_add_d(&mesh[row_major_id_periodic(i + 1, j + 1, k + 1, N)],
+               value * dx * dy * dz);
 }
 
 /**
@@ -165,6 +173,74 @@ INLINE static void gpart_to_mesh_CIC(const struct gpart* gp, double* rho, int N,
   CIC_set(rho, N, i, j, k, tx, ty, tz, dx, dy, dz, mass);
 }
 
+/**
+ * @brief Assigns all the #gpart of a #cell to a density mesh using the CIC
+ * method.
+ *
+ * @param c The #cell.
+ * @param rho The density mesh.
+ * @param N the size of the mesh along one axis.
+ * @param fac The width of a mesh cell.
+ * @param dim The dimensions of the simulation box.
+ */
+void cell_gpart_to_mesh_CIC(const struct cell* c, double* rho, int N,
+                            double fac, const double dim[3]) {
+  const int gcount = c->grav.count;
+  const struct gpart* gparts = c->grav.parts;
+
+  /* Assign all the gpart of that cell to the mesh */
+  for (int i = 0; i < gcount; ++i)
+    gpart_to_mesh_CIC(&gparts[i], rho, N, fac, dim);
+}
+
+/**
+ * @brief Shared information about the mesh to be used by all the threads in the
+ * pool.
+ */
+struct cic_mapper_data {
+  const struct cell* cells;
+  double* rho;
+  int N;
+  double fac;
+  double dim[3];
+};
+
+/**
+ * @brief Threadpool mapper function for the mesh CIC assignment of a cell.
+ *
+ * @param map_data A chunk of the list of local cells.
+ * @param num The number of cells in the chunk.
+ * @param extra The information about the mesh and cells.
+ */
+void cell_gpart_to_mesh_CIC_mapper(void* map_data, int num, void* extra) {
+
+  /* Unpack the shared information */
+  const struct cic_mapper_data* data = (struct cic_mapper_data*)extra;
+  const struct cell* cells = data->cells;
+  double* rho = data->rho;
+  const int N = data->N;
+  const double fac = data->fac;
+  const double dim[3] = {data->dim[0], data->dim[1], data->dim[2]};
+
+  /* Pointer to the chunk to be processed */
+  int* local_cells = (int*)map_data;
+
+  // MATTHIEU: This could in principle be improved by creating a local mesh
+  //           with just the extent required for the cell. Assignment can
+  //           then be done without atomics. That local mesh is then added
+  //           atomically to the global one.
+
+  /* Loop over the elements assigned to this thread */
+  for (int i = 0; i < num; ++i) {
+
+    /* Pointer to local cell */
+    const struct cell* c = &cells[local_cells[i]];
+
+    /* Assign this cell's content to the mesh */
+    cell_gpart_to_mesh_CIC(c, rho, N, fac, dim);
+  }
+}
+
 /**
  * @brief Computes the potential on a gpart from a given mesh using the CIC
  * method.
@@ -279,18 +355,24 @@ void mesh_to_gparts_CIC(struct gpart* gp, const double* pot, int N, double fac,
  *
  * @param mesh The #pm_mesh used to store the potential.
  * @param s The #space containing the particles.
+ * @param tp The #threadpool object used for parallelisation.
  * @param verbose Are we talkative?
  */
 void pm_mesh_compute_potential(struct pm_mesh* mesh, const struct space* s,
-                               int verbose) {
+                               struct threadpool* tp, int verbose) {
 
 #ifdef HAVE_FFTW
 
   const double r_s = mesh->r_s;
   const double box_size = s->dim[0];
   const double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
+  const int* local_cells = s->local_cells_top;
+  const int nr_local_cells = s->nr_local_cells;
 
   if (r_s <= 0.) error("Invalid value of a_smooth");
+  if (mesh->dim[0] != dim[0] || mesh->dim[1] != dim[1] ||
+      mesh->dim[2] != dim[2])
+    error("Domain size does not match the value stored in the space.");
 
   /* Some useful constants */
   const int N = mesh->N;
@@ -314,30 +396,61 @@ void pm_mesh_compute_potential(struct pm_mesh* mesh, const struct space* s,
   fftw_plan inverse_plan = fftw_plan_dft_c2r_3d(
       N, N, N, frho, rho, FFTW_ESTIMATE | FFTW_DESTROY_INPUT);
 
-  const ticks tic = getticks();
+  ticks tic = getticks();
 
   /* Zero everything */
   bzero(rho, N * N * N * sizeof(double));
 
-  /* Do a CIC mesh assignment of the gparts */
-  for (size_t i = 0; i < s->nr_gparts; ++i)
-    gpart_to_mesh_CIC(&s->gparts[i], rho, N, cell_fac, dim);
+  /* Gather the mesh shared information to be used by the threads */
+  struct cic_mapper_data data;
+  data.cells = s->cells_top;
+  data.rho = rho;
+  data.N = N;
+  data.fac = cell_fac;
+  data.dim[0] = dim[0];
+  data.dim[1] = dim[1];
+  data.dim[2] = dim[2];
+
+  /* Do a parallel CIC mesh assignment of the gparts but only using
+     the local top-level cells */
+  threadpool_map(tp, cell_gpart_to_mesh_CIC_mapper, (void*)local_cells,
+                 nr_local_cells, sizeof(int), 0, (void*)&data);
 
   if (verbose)
-    message("gpart assignment took %.3f %s.",
+    message("Gpart assignment took %.3f %s.",
             clocks_from_ticks(getticks() - tic), clocks_getunit());
 
+#ifdef WITH_MPI
+
+  MPI_Barrier(MPI_COMM_WORLD);
+  tic = getticks();
+
+  /* Merge everybody's share of the density mesh */
+  MPI_Allreduce(MPI_IN_PLACE, rho, N * N * N, MPI_DOUBLE, MPI_SUM,
+                MPI_COMM_WORLD);
+
+  if (verbose)
+    message("Mesh comunication took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+#endif
+
   /* message("\n\n\n DENSITY"); */
   /* print_array(rho, N); */
 
-  const ticks tic2 = getticks();
+  tic = getticks();
 
   /* Fourier transform to go to magic-land */
   fftw_execute(forward_plan);
 
+  if (verbose)
+    message("Forward Fourier transform took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
   /* frho now contains the Fourier transform of the density field */
   /* frho contains NxNx(N/2+1) complex numbers */
 
+  tic = getticks();
+
   /* Some common factors */
   const double green_fac = -1. / (M_PI * box_size);
   const double a_smooth2 = 4. * M_PI * M_PI * r_s * r_s / (box_size * box_size);
@@ -399,18 +512,25 @@ void pm_mesh_compute_potential(struct pm_mesh* mesh, const struct space* s,
   frho[0][0] = 0.;
   frho[0][1] = 0.;
 
+  if (verbose)
+    message("Applying Green function took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+  tic = getticks();
+
   /* Fourier transform to come back from magic-land */
   fftw_execute(inverse_plan);
 
+  if (verbose)
+    message("Backwards Fourier transform took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
   /* rho now contains the potential */
   /* This array is now again NxNxN real numbers */
+
   /* Let's store it in the structure */
   mesh->potential = rho;
 
-  if (verbose)
-    message("Fourier-space PM took %.3f %s.",
-            clocks_from_ticks(getticks() - tic2), clocks_getunit());
-
   /* message("\n\n\n POTENTIAL"); */
   /* print_array(potential, N); */
 
@@ -450,8 +570,20 @@ void pm_mesh_interpolate_forces(const struct pm_mesh* mesh,
   for (int i = 0; i < gcount; ++i) {
     struct gpart* gp = &gparts[i];
 
-    if (gpart_is_active(gp, e))
+    if (gpart_is_active(gp, e)) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check that particles have been drifted to the current time */
+      if (gp->ti_drift != e->ti_current)
+        error("gpart not drifted to current time");
+
+      /* Check that the particle was initialised */
+      if (gp->initialised == 0)
+        error("Adding forces to an un-initialised gpart.");
+#endif
+
       mesh_to_gparts_CIC(gp, potential, N, cell_fac, dim);
+    }
   }
 #else
   error("No FFTW library found. Cannot compute periodic long-range forces.");
@@ -464,18 +596,20 @@ void pm_mesh_interpolate_forces(const struct pm_mesh* mesh,
  * @param mesh The #pm_mesh to initialise.
  * @param props The propoerties of the gravity scheme.
  * @param dim The (comoving) side-lengths of the simulation volume.
+ * @param nr_threads The number of threads on this MPI rank.
  */
 void pm_mesh_init(struct pm_mesh* mesh, const struct gravity_props* props,
-                  double dim[3]) {
+                  double dim[3], int nr_threads) {
 
 #ifdef HAVE_FFTW
 
-  if (dim[0] != dim[1] || dim[0] != dim[1])
+  if (dim[0] != dim[1] || dim[0] != dim[2])
     error("Doing mesh-gravity on a non-cubic domain");
 
   const int N = props->mesh_size;
   const double box_size = dim[0];
 
+  mesh->nr_threads = nr_threads;
   mesh->periodic = 1;
   mesh->N = N;
   mesh->dim[0] = dim[0];
@@ -487,6 +621,17 @@ void pm_mesh_init(struct pm_mesh* mesh, const struct gravity_props* props,
   mesh->r_cut_max = mesh->r_s * props->r_cut_max_ratio;
   mesh->r_cut_min = mesh->r_s * props->r_cut_min_ratio;
 
+  if (2. * mesh->r_cut_max > box_size)
+    error("Mesh too small or r_cut_max too big for this box size");
+
+#ifdef HAVE_THREADED_FFTW
+  /* Initialise the thread-parallel FFTW version */
+  if (N >= 64) {
+    fftw_init_threads();
+    fftw_plan_with_nthreads(nr_threads);
+  }
+#endif
+
   /* Allocate the memory for the combined density and potential array */
   mesh->potential = (double*)fftw_malloc(sizeof(double) * N * N * N);
   if (mesh->potential == NULL)
@@ -525,6 +670,10 @@ void pm_mesh_init_no_mesh(struct pm_mesh* mesh, double dim[3]) {
  */
 void pm_mesh_clean(struct pm_mesh* mesh) {
 
+#ifdef HAVE_THREADED_FFTW
+  fftw_cleanup_threads();
+#endif
+
   if (mesh->potential) free(mesh->potential);
   mesh->potential = 0;
 }
@@ -551,15 +700,26 @@ void pm_mesh_struct_restore(struct pm_mesh* mesh, FILE* stream) {
 
   restart_read_blocks((void*)mesh, sizeof(struct pm_mesh), 1, stream, NULL,
                       "gravity props");
+
+  if (mesh->periodic) {
+
 #ifdef HAVE_FFTW
-  const int N = mesh->N;
+    const int N = mesh->N;
 
-  /* Allocate the memory for the combined density and potential array */
-  mesh->potential = (double*)fftw_malloc(sizeof(double) * N * N * N);
-  if (mesh->potential == NULL)
-    error("Error allocating memory for the long-range gravity mesh.");
+#ifdef HAVE_THREADED_FFTW
+    /* Initialise the thread-parallel FFTW version */
+    if (N >= 64) {
+      fftw_init_threads();
+      fftw_plan_with_nthreads(mesh->nr_threads);
+    }
+#endif
 
+    /* Allocate the memory for the combined density and potential array */
+    mesh->potential = (double*)fftw_malloc(sizeof(double) * N * N * N);
+    if (mesh->potential == NULL)
+      error("Error allocating memory for the long-range gravity mesh.");
 #else
-  error("No FFTW library found. Cannot compute periodic long-range forces.");
+    error("No FFTW library found. Cannot compute periodic long-range forces.");
 #endif
+  }
 }
diff --git a/src/mesh_gravity.h b/src/mesh_gravity.h
index c512a53ca349816caf4c666c6f504dd4b717bcb7..1b2d997398ee6f3f665340cedb790c241e641cfa 100644
--- a/src/mesh_gravity.h
+++ b/src/mesh_gravity.h
@@ -29,6 +29,7 @@
 /* Forward declarations */
 struct space;
 struct gpart;
+struct threadpool;
 
 /**
  * @brief Data structure for the long-range periodic forces using a mesh
@@ -38,6 +39,9 @@ struct pm_mesh {
   /*! Is the calculation using periodic BCs? */
   int periodic;
 
+  /*! The number of threads used by the FFTW library */
+  int nr_threads;
+
   /*! Side-length of the mesh */
   int N;
 
@@ -64,10 +68,10 @@ struct pm_mesh {
 };
 
 void pm_mesh_init(struct pm_mesh *mesh, const struct gravity_props *props,
-                  double dim[3]);
+                  double dim[3], int nr_threads);
 void pm_mesh_init_no_mesh(struct pm_mesh *mesh, double dim[3]);
 void pm_mesh_compute_potential(struct pm_mesh *mesh, const struct space *s,
-                               int verbose);
+                               struct threadpool *tp, int verbose);
 void pm_mesh_interpolate_forces(const struct pm_mesh *mesh,
                                 const struct engine *e, struct gpart *gparts,
                                 int gcount);
diff --git a/src/minmax.h b/src/minmax.h
index 90dd87968a94d9601a87fd3b826000c166a98966..e4d7c8788ea1e43d1c296a212193049a94347949 100644
--- a/src/minmax.h
+++ b/src/minmax.h
@@ -71,4 +71,36 @@
     max(_temp, _z);                          \
   })
 
+/**
+ * @brief Minimum of four numbers
+ *
+ * This macro evaluates its arguments exactly once.
+ */
+#define min4(x, y, z, w)                      \
+  ({                                          \
+    const __typeof__(x) _x = (x);             \
+    const __typeof__(y) _y = (y);             \
+    const __typeof__(z) _z = (z);             \
+    const __typeof__(w) _w = (w);             \
+    const __typeof__(x) _temp1 = min(_x, _y); \
+    const __typeof__(x) _temp2 = min(_z, _w); \
+    min(_temp1, _temp2);                      \
+  })
+
+/**
+ * @brief Maximum of four numbers
+ *
+ * This macro evaluates its arguments exactly once.
+ */
+#define max4(x, y, z, w)                      \
+  ({                                          \
+    const __typeof__(x) _x = (x);             \
+    const __typeof__(y) _y = (y);             \
+    const __typeof__(z) _z = (z);             \
+    const __typeof__(w) _w = (w);             \
+    const __typeof__(x) _temp1 = max(_x, _y); \
+    const __typeof__(x) _temp2 = max(_z, _w); \
+    max(_temp1, _temp2);                      \
+  })
+
 #endif /* SWIFT_MINMAX_H */
diff --git a/src/multipole.c b/src/multipole.c
index bd5c6d6546fa0546108dcd53d7fe4060293c37a7..a77e6fce297802fb4118b7ac3d4c6a9bf4ecfd22 100644
--- a/src/multipole.c
+++ b/src/multipole.c
@@ -20,3 +20,70 @@
 
 /* Config parameters. */
 #include "../config.h"
+
+/* This object's header. */
+#include "multipole.h"
+
+/* MPI headers. */
+#ifdef WITH_MPI
+#include <mpi.h>
+#endif
+
+#ifdef WITH_MPI
+
+/* MPI data type for the multipole transfer and reduction */
+MPI_Datatype multipole_mpi_type;
+MPI_Op multipole_mpi_reduce_op;
+
+/**
+ * @brief Apply a bit-by-bit XOR operattion on #gravity_tensors (i.e. does
+ * a^=b).
+ *
+ * @param a The #gravity_tensors to add to.
+ * @param b The #gravity_tensors to add.
+ */
+void gravity_binary_xor(struct gravity_tensors *a,
+                        const struct gravity_tensors *b) {
+
+  char *aa = (char *)a;
+  const char *bb = (const char *)b;
+
+  for (size_t i = 0; i < sizeof(struct gravity_tensors); ++i) {
+    aa[i] ^= bb[i];
+  }
+}
+
+/**
+ * @brief MPI reduction function for the #gravity_tensors.
+ *
+ * @param invec Array of #gravity_tensors to read.
+ * @param inoutvec Array of #gravity_tensors to read and do the reduction into.
+ * @param len The length of the array.
+ * @param datatype The MPI type this function acts upon (unused).
+ */
+void gravity_tensors_mpi_reduce(void *invec, void *inoutvec, int *len,
+                                MPI_Datatype *datatype) {
+
+  for (int i = 0; i < *len; ++i) {
+    gravity_binary_xor(&((struct gravity_tensors *)inoutvec)[i],
+                       &((const struct gravity_tensors *)invec)[i]);
+  }
+}
+
+void multipole_create_mpi_types(void) {
+
+  /* Create the datatype for multipoles */
+  /* We just consider each structure to be a byte field disregarding their */
+  /* detailed content */
+  if (MPI_Type_contiguous(
+          sizeof(struct gravity_tensors) / sizeof(unsigned char), MPI_BYTE,
+          &multipole_mpi_type) != MPI_SUCCESS ||
+      MPI_Type_commit(&multipole_mpi_type) != MPI_SUCCESS) {
+    error("Failed to create MPI type for multipole.");
+  }
+
+  /* And the reduction operator */
+  MPI_Op_create(gravity_tensors_mpi_reduce, 1, &multipole_mpi_reduce_op);
+}
+
+#endif
diff --git a/src/multipole.h b/src/multipole.h
index c05aa36890313ea22f725ee272746bdf63f597ea..e867dfd4e2cc5c9fcd06d7d95dcf76a97689c2b3 100644
--- a/src/multipole.h
+++ b/src/multipole.h
@@ -186,12 +186,12 @@ struct gravity_tensors {
     /*! The actual content */
     struct {
 
-      /*! Multipole mass */
-      struct multipole m_pole;
-
       /*! Field tensor for the potential */
       struct grav_tensor pot;
 
+      /*! Multipole mass */
+      struct multipole m_pole;
+
       /*! Centre of mass of the matter dsitribution */
       double CoM[3];
 
@@ -207,6 +207,13 @@ struct gravity_tensors {
   };
 } SWIFT_STRUCT_ALIGN;
 
+#ifdef WITH_MPI
+/* MPI datatypes for transfers */
+extern MPI_Datatype multipole_mpi_type;
+extern MPI_Op multipole_mpi_reduce_op;
+void multipole_create_mpi_types(void);
+#endif
+
 /**
  * @brief Reset the data of a #multipole.
  *
@@ -293,8 +300,8 @@ INLINE static void gravity_field_tensors_init(struct grav_tensor *l,
  * @param la The gravity tensors to add to.
  * @param lb The gravity tensors to add.
  */
-INLINE static void gravity_field_tensors_add(struct grav_tensor *la,
-                                             const struct grav_tensor *lb) {
+INLINE static void gravity_field_tensors_add(
+    struct grav_tensor *restrict la, const struct grav_tensor *restrict lb) {
 #ifdef SWIFT_DEBUG_CHECKS
   if (lb->num_interacted == 0) error("Adding tensors that did not interact");
   la->num_interacted += lb->num_interacted;
@@ -502,8 +509,8 @@ INLINE static void gravity_multipole_print(const struct multipole *m) {
  * @param ma The multipole to add to.
  * @param mb The multipole to add.
  */
-INLINE static void gravity_multipole_add(struct multipole *ma,
-                                         const struct multipole *mb) {
+INLINE static void gravity_multipole_add(struct multipole *restrict ma,
+                                         const struct multipole *restrict mb) {
 
   /* Add 0th order term */
   ma->M_000 += mb->M_000;
@@ -1028,6 +1035,11 @@ INLINE static void gravity_P2M(struct gravity_tensors *multi,
   for (int k = 0; k < gcount; k++) {
     const double m = gparts[k].mass;
 
+#ifdef SWIFT_DEBUG_CHECKS
+    if (gparts[k].time_bin == time_bin_inhibited)
+      error("Inhibited particle in P2M. Should have been removed earlier.");
+#endif
+
     mass += m;
     com[0] += gparts[k].x[0] * m;
     com[1] += gparts[k].x[1] * m;
@@ -1037,13 +1049,6 @@ INLINE static void gravity_P2M(struct gravity_tensors *multi,
     vel[2] += gparts[k].v_full[2] * m;
   }
 
-#ifdef PLANETARY_SPH
-  /* Prevent FPE from zero mass with the temporary outside-the-box particles */
-  if (mass == 0.f) {
-    mass = FLT_MIN;
-  }
-#endif  // PLANETARY_SPH
-
   /* Final operation on CoM */
   const double imass = 1.0 / mass;
   com[0] *= imass;
@@ -1307,8 +1312,8 @@ INLINE static void gravity_P2M(struct gravity_tensors *multi,
  * @param pos_a The position to which m_b will be shifted.
  * @param pos_b The current postion of the multipole to shift.
  */
-INLINE static void gravity_M2M(struct multipole *m_a,
-                               const struct multipole *m_b,
+INLINE static void gravity_M2M(struct multipole *restrict m_a,
+                               const struct multipole *restrict m_b,
                                const double pos_a[3], const double pos_b[3]) {
 
   /* Shift 0th order term */
@@ -1558,43 +1563,11 @@ INLINE static void gravity_M2M(struct multipole *m_a,
  *
  * @param l_b The field tensor to compute.
  * @param m_a The multipole creating the field.
- * @param pos_b The position of the field tensor.
- * @param pos_a The position of the multipole.
- * @param props The #gravity_props of this calculation.
- * @param periodic Is the calculation periodic ?
- * @param dim The size of the simulation box.
- * @param rs_inv The inverse of the gravity mesh-smoothing scale.
+ * @param pot The derivatives of the potential.
  */
-INLINE static void gravity_M2L(struct grav_tensor *l_b,
-                               const struct multipole *m_a,
-                               const double pos_b[3], const double pos_a[3],
-                               const struct gravity_props *props, int periodic,
-                               const double dim[3], float rs_inv) {
-
-  /* Recover some constants */
-  const float eps = props->epsilon_cur;
-  const float eps_inv = props->epsilon_cur_inv;
-
-  /* Compute distance vector */
-  float dx = (float)(pos_b[0] - pos_a[0]);
-  float dy = (float)(pos_b[1] - pos_a[1]);
-  float dz = (float)(pos_b[2] - pos_a[2]);
-
-  /* Apply BC */
-  if (periodic) {
-    dx = nearest(dx, dim[0]);
-    dy = nearest(dy, dim[1]);
-    dz = nearest(dz, dim[2]);
-  }
-
-  /* Compute distance */
-  const float r2 = dx * dx + dy * dy + dz * dz;
-  const float r_inv = 1. / sqrtf(r2);
-
-  /* Compute all derivatives */
-  struct potential_derivatives_M2L pot;
-  compute_potential_derivatives_M2L(dx, dy, dz, r2, r_inv, eps, eps_inv,
-                                    periodic, rs_inv, &pot);
+INLINE static void gravity_M2L_apply(
+    struct grav_tensor *restrict l_b, const struct multipole *restrict m_a,
+    const struct potential_derivatives_M2L *pot) {
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Count interactions */
@@ -1604,330 +1577,368 @@ INLINE static void gravity_M2L(struct grav_tensor *l_b,
   /* Record that this tensor has received contributions */
   l_b->interacted = 1;
 
+  const float M_000 = m_a->M_000;
+  const float D_000 = pot->D_000;
+
   /*  0th order term */
-  l_b->F_000 += m_a->M_000 * pot.D_000;
+  l_b->F_000 += M_000 * D_000;
 
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 0
+
+  /* The dipole term is zero when using the CoM */
+  /* The compiler will optimize out the terms in the equations */
+  /* below. We keep them written to maintain the logical structure. */
+  const float M_100 = 0.f;
+  const float M_010 = 0.f;
+  const float M_001 = 0.f;
+
+  const float D_100 = pot->D_100;
+  const float D_010 = pot->D_010;
+  const float D_001 = pot->D_001;
+
   /*  1st order multipole term (addition to rank 0)*/
-  l_b->F_000 +=
-      m_a->M_100 * pot.D_100 + m_a->M_010 * pot.D_010 + m_a->M_001 * pot.D_001;
+  l_b->F_000 += M_100 * D_100 + M_010 * D_010 + M_001 * D_001;
 
   /*  1st order multipole term (addition to rank 1)*/
-  l_b->F_100 += m_a->M_000 * pot.D_100;
-  l_b->F_010 += m_a->M_000 * pot.D_010;
-  l_b->F_001 += m_a->M_000 * pot.D_001;
+  l_b->F_100 += M_000 * D_100;
+  l_b->F_010 += M_000 * D_010;
+  l_b->F_001 += M_000 * D_001;
 #endif
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 1
 
+  const float M_200 = m_a->M_200;
+  const float M_020 = m_a->M_020;
+  const float M_002 = m_a->M_002;
+  const float M_110 = m_a->M_110;
+  const float M_101 = m_a->M_101;
+  const float M_011 = m_a->M_011;
+
+  const float D_200 = pot->D_200;
+  const float D_020 = pot->D_020;
+  const float D_002 = pot->D_002;
+  const float D_110 = pot->D_110;
+  const float D_101 = pot->D_101;
+  const float D_011 = pot->D_011;
+
   /*  2nd order multipole term (addition to rank 0)*/
-  l_b->F_000 +=
-      m_a->M_200 * pot.D_200 + m_a->M_020 * pot.D_020 + m_a->M_002 * pot.D_002;
-  l_b->F_000 +=
-      m_a->M_110 * pot.D_110 + m_a->M_101 * pot.D_101 + m_a->M_011 * pot.D_011;
+  l_b->F_000 += M_200 * D_200 + M_020 * D_020 + M_002 * D_002;
+  l_b->F_000 += M_110 * D_110 + M_101 * D_101 + M_011 * D_011;
 
   /*  2nd order multipole term (addition to rank 1)*/
-  l_b->F_100 +=
-      m_a->M_100 * pot.D_200 + m_a->M_010 * pot.D_110 + m_a->M_001 * pot.D_101;
-  l_b->F_010 +=
-      m_a->M_100 * pot.D_110 + m_a->M_010 * pot.D_020 + m_a->M_001 * pot.D_011;
-  l_b->F_001 +=
-      m_a->M_100 * pot.D_101 + m_a->M_010 * pot.D_011 + m_a->M_001 * pot.D_002;
+  l_b->F_100 += M_100 * D_200 + M_010 * D_110 + M_001 * D_101;
+  l_b->F_010 += M_100 * D_110 + M_010 * D_020 + M_001 * D_011;
+  l_b->F_001 += M_100 * D_101 + M_010 * D_011 + M_001 * D_002;
 
   /*  2nd order multipole term (addition to rank 2)*/
-  l_b->F_200 += m_a->M_000 * pot.D_200;
-  l_b->F_020 += m_a->M_000 * pot.D_020;
-  l_b->F_002 += m_a->M_000 * pot.D_002;
-  l_b->F_110 += m_a->M_000 * pot.D_110;
-  l_b->F_101 += m_a->M_000 * pot.D_101;
-  l_b->F_011 += m_a->M_000 * pot.D_011;
+  l_b->F_200 += M_000 * D_200;
+  l_b->F_020 += M_000 * D_020;
+  l_b->F_002 += M_000 * D_002;
+  l_b->F_110 += M_000 * D_110;
+  l_b->F_101 += M_000 * D_101;
+  l_b->F_011 += M_000 * D_011;
 #endif
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 2
 
+  const float M_300 = m_a->M_300;
+  const float M_030 = m_a->M_030;
+  const float M_003 = m_a->M_003;
+  const float M_210 = m_a->M_210;
+  const float M_201 = m_a->M_201;
+  const float M_021 = m_a->M_021;
+  const float M_120 = m_a->M_120;
+  const float M_012 = m_a->M_012;
+  const float M_102 = m_a->M_102;
+  const float M_111 = m_a->M_111;
+
+  const float D_300 = pot->D_300;
+  const float D_030 = pot->D_030;
+  const float D_003 = pot->D_003;
+  const float D_210 = pot->D_210;
+  const float D_201 = pot->D_201;
+  const float D_021 = pot->D_021;
+  const float D_120 = pot->D_120;
+  const float D_012 = pot->D_012;
+  const float D_102 = pot->D_102;
+  const float D_111 = pot->D_111;
+
   /*  3rd order multipole term (addition to rank 0)*/
-  l_b->F_000 +=
-      m_a->M_300 * pot.D_300 + m_a->M_030 * pot.D_030 + m_a->M_003 * pot.D_003;
-  l_b->F_000 +=
-      m_a->M_210 * pot.D_210 + m_a->M_201 * pot.D_201 + m_a->M_120 * pot.D_120;
-  l_b->F_000 +=
-      m_a->M_021 * pot.D_021 + m_a->M_102 * pot.D_102 + m_a->M_012 * pot.D_012;
-  l_b->F_000 += m_a->M_111 * pot.D_111;
+  l_b->F_000 += M_300 * D_300 + M_030 * D_030 + M_003 * D_003;
+  l_b->F_000 += M_210 * D_210 + M_201 * D_201 + M_120 * D_120;
+  l_b->F_000 += M_021 * D_021 + M_102 * D_102 + M_012 * D_012;
+  l_b->F_000 += M_111 * D_111;
 
   /*  3rd order multipole term (addition to rank 1)*/
-  l_b->F_100 +=
-      m_a->M_200 * pot.D_300 + m_a->M_020 * pot.D_120 + m_a->M_002 * pot.D_102;
-  l_b->F_100 +=
-      m_a->M_110 * pot.D_210 + m_a->M_101 * pot.D_201 + m_a->M_011 * pot.D_111;
-  l_b->F_010 +=
-      m_a->M_200 * pot.D_210 + m_a->M_020 * pot.D_030 + m_a->M_002 * pot.D_012;
-  l_b->F_010 +=
-      m_a->M_110 * pot.D_120 + m_a->M_101 * pot.D_111 + m_a->M_011 * pot.D_021;
-  l_b->F_001 +=
-      m_a->M_200 * pot.D_201 + m_a->M_020 * pot.D_021 + m_a->M_002 * pot.D_003;
-  l_b->F_001 +=
-      m_a->M_110 * pot.D_111 + m_a->M_101 * pot.D_102 + m_a->M_011 * pot.D_012;
+  l_b->F_100 += M_200 * D_300 + M_020 * D_120 + M_002 * D_102;
+  l_b->F_100 += M_110 * D_210 + M_101 * D_201 + M_011 * D_111;
+  l_b->F_010 += M_200 * D_210 + M_020 * D_030 + M_002 * D_012;
+  l_b->F_010 += M_110 * D_120 + M_101 * D_111 + M_011 * D_021;
+  l_b->F_001 += M_200 * D_201 + M_020 * D_021 + M_002 * D_003;
+  l_b->F_001 += M_110 * D_111 + M_101 * D_102 + M_011 * D_012;
 
   /*  3rd order multipole term (addition to rank 2)*/
-  l_b->F_200 +=
-      m_a->M_100 * pot.D_300 + m_a->M_010 * pot.D_210 + m_a->M_001 * pot.D_201;
-  l_b->F_020 +=
-      m_a->M_100 * pot.D_120 + m_a->M_010 * pot.D_030 + m_a->M_001 * pot.D_021;
-  l_b->F_002 +=
-      m_a->M_100 * pot.D_102 + m_a->M_010 * pot.D_012 + m_a->M_001 * pot.D_003;
-  l_b->F_110 +=
-      m_a->M_100 * pot.D_210 + m_a->M_010 * pot.D_120 + m_a->M_001 * pot.D_111;
-  l_b->F_101 +=
-      m_a->M_100 * pot.D_201 + m_a->M_010 * pot.D_111 + m_a->M_001 * pot.D_102;
-  l_b->F_011 +=
-      m_a->M_100 * pot.D_111 + m_a->M_010 * pot.D_021 + m_a->M_001 * pot.D_012;
+  l_b->F_200 += M_100 * D_300 + M_010 * D_210 + M_001 * D_201;
+  l_b->F_020 += M_100 * D_120 + M_010 * D_030 + M_001 * D_021;
+  l_b->F_002 += M_100 * D_102 + M_010 * D_012 + M_001 * D_003;
+  l_b->F_110 += M_100 * D_210 + M_010 * D_120 + M_001 * D_111;
+  l_b->F_101 += M_100 * D_201 + M_010 * D_111 + M_001 * D_102;
+  l_b->F_011 += M_100 * D_111 + M_010 * D_021 + M_001 * D_012;
 
   /*  3rd order multipole term (addition to rank 3)*/
-  l_b->F_300 += m_a->M_000 * pot.D_300;
-  l_b->F_030 += m_a->M_000 * pot.D_030;
-  l_b->F_003 += m_a->M_000 * pot.D_003;
-  l_b->F_210 += m_a->M_000 * pot.D_210;
-  l_b->F_201 += m_a->M_000 * pot.D_201;
-  l_b->F_120 += m_a->M_000 * pot.D_120;
-  l_b->F_021 += m_a->M_000 * pot.D_021;
-  l_b->F_102 += m_a->M_000 * pot.D_102;
-  l_b->F_012 += m_a->M_000 * pot.D_012;
-  l_b->F_111 += m_a->M_000 * pot.D_111;
+  l_b->F_300 += M_000 * D_300;
+  l_b->F_030 += M_000 * D_030;
+  l_b->F_003 += M_000 * D_003;
+  l_b->F_210 += M_000 * D_210;
+  l_b->F_201 += M_000 * D_201;
+  l_b->F_120 += M_000 * D_120;
+  l_b->F_021 += M_000 * D_021;
+  l_b->F_102 += M_000 * D_102;
+  l_b->F_012 += M_000 * D_012;
+  l_b->F_111 += M_000 * D_111;
 #endif
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 3
+
+  const float M_400 = m_a->M_400;
+  const float M_040 = m_a->M_040;
+  const float M_004 = m_a->M_004;
+  const float M_310 = m_a->M_310;
+  const float M_301 = m_a->M_301;
+  const float M_031 = m_a->M_031;
+  const float M_130 = m_a->M_130;
+  const float M_013 = m_a->M_013;
+  const float M_103 = m_a->M_103;
+  const float M_220 = m_a->M_220;
+  const float M_202 = m_a->M_202;
+  const float M_022 = m_a->M_022;
+  const float M_211 = m_a->M_211;
+  const float M_121 = m_a->M_121;
+  const float M_112 = m_a->M_112;
+
+  const float D_400 = pot->D_400;
+  const float D_040 = pot->D_040;
+  const float D_004 = pot->D_004;
+  const float D_310 = pot->D_310;
+  const float D_301 = pot->D_301;
+  const float D_031 = pot->D_031;
+  const float D_130 = pot->D_130;
+  const float D_013 = pot->D_013;
+  const float D_103 = pot->D_103;
+  const float D_220 = pot->D_220;
+  const float D_202 = pot->D_202;
+  const float D_022 = pot->D_022;
+  const float D_211 = pot->D_211;
+  const float D_121 = pot->D_121;
+  const float D_112 = pot->D_112;
+
   /* Compute 4th order field tensor terms (addition to rank 0) */
-  l_b->F_000 +=
-      m_a->M_004 * pot.D_004 + m_a->M_013 * pot.D_013 + m_a->M_022 * pot.D_022 +
-      m_a->M_031 * pot.D_031 + m_a->M_040 * pot.D_040 + m_a->M_103 * pot.D_103 +
-      m_a->M_112 * pot.D_112 + m_a->M_121 * pot.D_121 + m_a->M_130 * pot.D_130 +
-      m_a->M_202 * pot.D_202 + m_a->M_211 * pot.D_211 + m_a->M_220 * pot.D_220 +
-      m_a->M_301 * pot.D_301 + m_a->M_310 * pot.D_310 + m_a->M_400 * pot.D_400;
+  l_b->F_000 += M_004 * D_004 + M_013 * D_013 + M_022 * D_022 + M_031 * D_031 +
+                M_040 * D_040 + M_103 * D_103 + M_112 * D_112 + M_121 * D_121 +
+                M_130 * D_130 + M_202 * D_202 + M_211 * D_211 + M_220 * D_220 +
+                M_301 * D_301 + M_310 * D_310 + M_400 * D_400;
 
   /* Compute 4th order field tensor terms (addition to rank 1) */
-  l_b->F_001 += m_a->M_003 * pot.D_004 + m_a->M_012 * pot.D_013 +
-                m_a->M_021 * pot.D_022 + m_a->M_030 * pot.D_031 +
-                m_a->M_102 * pot.D_103 + m_a->M_111 * pot.D_112 +
-                m_a->M_120 * pot.D_121 + m_a->M_201 * pot.D_202 +
-                m_a->M_210 * pot.D_211 + m_a->M_300 * pot.D_301;
-  l_b->F_010 += m_a->M_003 * pot.D_013 + m_a->M_012 * pot.D_022 +
-                m_a->M_021 * pot.D_031 + m_a->M_030 * pot.D_040 +
-                m_a->M_102 * pot.D_112 + m_a->M_111 * pot.D_121 +
-                m_a->M_120 * pot.D_130 + m_a->M_201 * pot.D_211 +
-                m_a->M_210 * pot.D_220 + m_a->M_300 * pot.D_310;
-  l_b->F_100 += m_a->M_003 * pot.D_103 + m_a->M_012 * pot.D_112 +
-                m_a->M_021 * pot.D_121 + m_a->M_030 * pot.D_130 +
-                m_a->M_102 * pot.D_202 + m_a->M_111 * pot.D_211 +
-                m_a->M_120 * pot.D_220 + m_a->M_201 * pot.D_301 +
-                m_a->M_210 * pot.D_310 + m_a->M_300 * pot.D_400;
+  l_b->F_001 += M_003 * D_004 + M_012 * D_013 + M_021 * D_022 + M_030 * D_031 +
+                M_102 * D_103 + M_111 * D_112 + M_120 * D_121 + M_201 * D_202 +
+                M_210 * D_211 + M_300 * D_301;
+  l_b->F_010 += M_003 * D_013 + M_012 * D_022 + M_021 * D_031 + M_030 * D_040 +
+                M_102 * D_112 + M_111 * D_121 + M_120 * D_130 + M_201 * D_211 +
+                M_210 * D_220 + M_300 * D_310;
+  l_b->F_100 += M_003 * D_103 + M_012 * D_112 + M_021 * D_121 + M_030 * D_130 +
+                M_102 * D_202 + M_111 * D_211 + M_120 * D_220 + M_201 * D_301 +
+                M_210 * D_310 + M_300 * D_400;
 
   /* Compute 4th order field tensor terms (addition to rank 2) */
-  l_b->F_002 += m_a->M_002 * pot.D_004 + m_a->M_011 * pot.D_013 +
-                m_a->M_020 * pot.D_022 + m_a->M_101 * pot.D_103 +
-                m_a->M_110 * pot.D_112 + m_a->M_200 * pot.D_202;
-  l_b->F_011 += m_a->M_002 * pot.D_013 + m_a->M_011 * pot.D_022 +
-                m_a->M_020 * pot.D_031 + m_a->M_101 * pot.D_112 +
-                m_a->M_110 * pot.D_121 + m_a->M_200 * pot.D_211;
-  l_b->F_020 += m_a->M_002 * pot.D_022 + m_a->M_011 * pot.D_031 +
-                m_a->M_020 * pot.D_040 + m_a->M_101 * pot.D_121 +
-                m_a->M_110 * pot.D_130 + m_a->M_200 * pot.D_220;
-  l_b->F_101 += m_a->M_002 * pot.D_103 + m_a->M_011 * pot.D_112 +
-                m_a->M_020 * pot.D_121 + m_a->M_101 * pot.D_202 +
-                m_a->M_110 * pot.D_211 + m_a->M_200 * pot.D_301;
-  l_b->F_110 += m_a->M_002 * pot.D_112 + m_a->M_011 * pot.D_121 +
-                m_a->M_020 * pot.D_130 + m_a->M_101 * pot.D_211 +
-                m_a->M_110 * pot.D_220 + m_a->M_200 * pot.D_310;
-  l_b->F_200 += m_a->M_002 * pot.D_202 + m_a->M_011 * pot.D_211 +
-                m_a->M_020 * pot.D_220 + m_a->M_101 * pot.D_301 +
-                m_a->M_110 * pot.D_310 + m_a->M_200 * pot.D_400;
+  l_b->F_002 += M_002 * D_004 + M_011 * D_013 + M_020 * D_022 + M_101 * D_103 +
+                M_110 * D_112 + M_200 * D_202;
+  l_b->F_011 += M_002 * D_013 + M_011 * D_022 + M_020 * D_031 + M_101 * D_112 +
+                M_110 * D_121 + M_200 * D_211;
+  l_b->F_020 += M_002 * D_022 + M_011 * D_031 + M_020 * D_040 + M_101 * D_121 +
+                M_110 * D_130 + M_200 * D_220;
+  l_b->F_101 += M_002 * D_103 + M_011 * D_112 + M_020 * D_121 + M_101 * D_202 +
+                M_110 * D_211 + M_200 * D_301;
+  l_b->F_110 += M_002 * D_112 + M_011 * D_121 + M_020 * D_130 + M_101 * D_211 +
+                M_110 * D_220 + M_200 * D_310;
+  l_b->F_200 += M_002 * D_202 + M_011 * D_211 + M_020 * D_220 + M_101 * D_301 +
+                M_110 * D_310 + M_200 * D_400;
 
   /* Compute 4th order field tensor terms (addition to rank 3) */
-  l_b->F_003 +=
-      m_a->M_001 * pot.D_004 + m_a->M_010 * pot.D_013 + m_a->M_100 * pot.D_103;
-  l_b->F_012 +=
-      m_a->M_001 * pot.D_013 + m_a->M_010 * pot.D_022 + m_a->M_100 * pot.D_112;
-  l_b->F_021 +=
-      m_a->M_001 * pot.D_022 + m_a->M_010 * pot.D_031 + m_a->M_100 * pot.D_121;
-  l_b->F_030 +=
-      m_a->M_001 * pot.D_031 + m_a->M_010 * pot.D_040 + m_a->M_100 * pot.D_130;
-  l_b->F_102 +=
-      m_a->M_001 * pot.D_103 + m_a->M_010 * pot.D_112 + m_a->M_100 * pot.D_202;
-  l_b->F_111 +=
-      m_a->M_001 * pot.D_112 + m_a->M_010 * pot.D_121 + m_a->M_100 * pot.D_211;
-  l_b->F_120 +=
-      m_a->M_001 * pot.D_121 + m_a->M_010 * pot.D_130 + m_a->M_100 * pot.D_220;
-  l_b->F_201 +=
-      m_a->M_001 * pot.D_202 + m_a->M_010 * pot.D_211 + m_a->M_100 * pot.D_301;
-  l_b->F_210 +=
-      m_a->M_001 * pot.D_211 + m_a->M_010 * pot.D_220 + m_a->M_100 * pot.D_310;
-  l_b->F_300 +=
-      m_a->M_001 * pot.D_301 + m_a->M_010 * pot.D_310 + m_a->M_100 * pot.D_400;
+  l_b->F_003 += M_001 * D_004 + M_010 * D_013 + M_100 * D_103;
+  l_b->F_012 += M_001 * D_013 + M_010 * D_022 + M_100 * D_112;
+  l_b->F_021 += M_001 * D_022 + M_010 * D_031 + M_100 * D_121;
+  l_b->F_030 += M_001 * D_031 + M_010 * D_040 + M_100 * D_130;
+  l_b->F_102 += M_001 * D_103 + M_010 * D_112 + M_100 * D_202;
+  l_b->F_111 += M_001 * D_112 + M_010 * D_121 + M_100 * D_211;
+  l_b->F_120 += M_001 * D_121 + M_010 * D_130 + M_100 * D_220;
+  l_b->F_201 += M_001 * D_202 + M_010 * D_211 + M_100 * D_301;
+  l_b->F_210 += M_001 * D_211 + M_010 * D_220 + M_100 * D_310;
+  l_b->F_300 += M_001 * D_301 + M_010 * D_310 + M_100 * D_400;
 
   /* Compute 4th order field tensor terms (addition to rank 4) */
-  l_b->F_004 += m_a->M_000 * pot.D_004;
-  l_b->F_013 += m_a->M_000 * pot.D_013;
-  l_b->F_022 += m_a->M_000 * pot.D_022;
-  l_b->F_031 += m_a->M_000 * pot.D_031;
-  l_b->F_040 += m_a->M_000 * pot.D_040;
-  l_b->F_103 += m_a->M_000 * pot.D_103;
-  l_b->F_112 += m_a->M_000 * pot.D_112;
-  l_b->F_121 += m_a->M_000 * pot.D_121;
-  l_b->F_130 += m_a->M_000 * pot.D_130;
-  l_b->F_202 += m_a->M_000 * pot.D_202;
-  l_b->F_211 += m_a->M_000 * pot.D_211;
-  l_b->F_220 += m_a->M_000 * pot.D_220;
-  l_b->F_301 += m_a->M_000 * pot.D_301;
-  l_b->F_310 += m_a->M_000 * pot.D_310;
-  l_b->F_400 += m_a->M_000 * pot.D_400;
+  l_b->F_004 += M_000 * D_004;
+  l_b->F_013 += M_000 * D_013;
+  l_b->F_022 += M_000 * D_022;
+  l_b->F_031 += M_000 * D_031;
+  l_b->F_040 += M_000 * D_040;
+  l_b->F_103 += M_000 * D_103;
+  l_b->F_112 += M_000 * D_112;
+  l_b->F_121 += M_000 * D_121;
+  l_b->F_130 += M_000 * D_130;
+  l_b->F_202 += M_000 * D_202;
+  l_b->F_211 += M_000 * D_211;
+  l_b->F_220 += M_000 * D_220;
+  l_b->F_301 += M_000 * D_301;
+  l_b->F_310 += M_000 * D_310;
+  l_b->F_400 += M_000 * D_400;
 
 #endif
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 4
 
+  const float M_500 = m_a->M_500;
+  const float M_050 = m_a->M_050;
+  const float M_005 = m_a->M_005;
+  const float M_410 = m_a->M_410;
+  const float M_401 = m_a->M_401;
+  const float M_041 = m_a->M_041;
+  const float M_140 = m_a->M_140;
+  const float M_014 = m_a->M_014;
+  const float M_104 = m_a->M_104;
+  const float M_320 = m_a->M_320;
+  const float M_302 = m_a->M_302;
+  const float M_230 = m_a->M_230;
+  const float M_032 = m_a->M_032;
+  const float M_203 = m_a->M_203;
+  const float M_023 = m_a->M_023;
+  const float M_122 = m_a->M_122;
+  const float M_212 = m_a->M_212;
+  const float M_221 = m_a->M_221;
+  const float M_311 = m_a->M_311;
+  const float M_131 = m_a->M_131;
+  const float M_113 = m_a->M_113;
+
+  const float D_500 = pot->D_500;
+  const float D_050 = pot->D_050;
+  const float D_005 = pot->D_005;
+  const float D_410 = pot->D_410;
+  const float D_401 = pot->D_401;
+  const float D_041 = pot->D_041;
+  const float D_140 = pot->D_140;
+  const float D_014 = pot->D_014;
+  const float D_104 = pot->D_104;
+  const float D_320 = pot->D_320;
+  const float D_302 = pot->D_302;
+  const float D_230 = pot->D_230;
+  const float D_032 = pot->D_032;
+  const float D_203 = pot->D_203;
+  const float D_023 = pot->D_023;
+  const float D_122 = pot->D_122;
+  const float D_212 = pot->D_212;
+  const float D_221 = pot->D_221;
+  const float D_311 = pot->D_311;
+  const float D_131 = pot->D_131;
+  const float D_113 = pot->D_113;
+
   /* Compute 5th order field tensor terms (addition to rank 0) */
-  l_b->F_000 +=
-      m_a->M_005 * pot.D_005 + m_a->M_014 * pot.D_014 + m_a->M_023 * pot.D_023 +
-      m_a->M_032 * pot.D_032 + m_a->M_041 * pot.D_041 + m_a->M_050 * pot.D_050 +
-      m_a->M_104 * pot.D_104 + m_a->M_113 * pot.D_113 + m_a->M_122 * pot.D_122 +
-      m_a->M_131 * pot.D_131 + m_a->M_140 * pot.D_140 + m_a->M_203 * pot.D_203 +
-      m_a->M_212 * pot.D_212 + m_a->M_221 * pot.D_221 + m_a->M_230 * pot.D_230 +
-      m_a->M_302 * pot.D_302 + m_a->M_311 * pot.D_311 + m_a->M_320 * pot.D_320 +
-      m_a->M_401 * pot.D_401 + m_a->M_410 * pot.D_410 + m_a->M_500 * pot.D_500;
+  l_b->F_000 += M_005 * D_005 + M_014 * D_014 + M_023 * D_023 + M_032 * D_032 +
+                M_041 * D_041 + M_050 * D_050 + M_104 * D_104 + M_113 * D_113 +
+                M_122 * D_122 + M_131 * D_131 + M_140 * D_140 + M_203 * D_203 +
+                M_212 * D_212 + M_221 * D_221 + M_230 * D_230 + M_302 * D_302 +
+                M_311 * D_311 + M_320 * D_320 + M_401 * D_401 + M_410 * D_410 +
+                M_500 * D_500;
 
   /* Compute 5th order field tensor terms (addition to rank 1) */
-  l_b->F_001 +=
-      m_a->M_004 * pot.D_005 + m_a->M_013 * pot.D_014 + m_a->M_022 * pot.D_023 +
-      m_a->M_031 * pot.D_032 + m_a->M_040 * pot.D_041 + m_a->M_103 * pot.D_104 +
-      m_a->M_112 * pot.D_113 + m_a->M_121 * pot.D_122 + m_a->M_130 * pot.D_131 +
-      m_a->M_202 * pot.D_203 + m_a->M_211 * pot.D_212 + m_a->M_220 * pot.D_221 +
-      m_a->M_301 * pot.D_302 + m_a->M_310 * pot.D_311 + m_a->M_400 * pot.D_401;
-  l_b->F_010 +=
-      m_a->M_004 * pot.D_014 + m_a->M_013 * pot.D_023 + m_a->M_022 * pot.D_032 +
-      m_a->M_031 * pot.D_041 + m_a->M_040 * pot.D_050 + m_a->M_103 * pot.D_113 +
-      m_a->M_112 * pot.D_122 + m_a->M_121 * pot.D_131 + m_a->M_130 * pot.D_140 +
-      m_a->M_202 * pot.D_212 + m_a->M_211 * pot.D_221 + m_a->M_220 * pot.D_230 +
-      m_a->M_301 * pot.D_311 + m_a->M_310 * pot.D_320 + m_a->M_400 * pot.D_410;
-  l_b->F_100 +=
-      m_a->M_004 * pot.D_104 + m_a->M_013 * pot.D_113 + m_a->M_022 * pot.D_122 +
-      m_a->M_031 * pot.D_131 + m_a->M_040 * pot.D_140 + m_a->M_103 * pot.D_203 +
-      m_a->M_112 * pot.D_212 + m_a->M_121 * pot.D_221 + m_a->M_130 * pot.D_230 +
-      m_a->M_202 * pot.D_302 + m_a->M_211 * pot.D_311 + m_a->M_220 * pot.D_320 +
-      m_a->M_301 * pot.D_401 + m_a->M_310 * pot.D_410 + m_a->M_400 * pot.D_500;
+  l_b->F_001 += M_004 * D_005 + M_013 * D_014 + M_022 * D_023 + M_031 * D_032 +
+                M_040 * D_041 + M_103 * D_104 + M_112 * D_113 + M_121 * D_122 +
+                M_130 * D_131 + M_202 * D_203 + M_211 * D_212 + M_220 * D_221 +
+                M_301 * D_302 + M_310 * D_311 + M_400 * D_401;
+  l_b->F_010 += M_004 * D_014 + M_013 * D_023 + M_022 * D_032 + M_031 * D_041 +
+                M_040 * D_050 + M_103 * D_113 + M_112 * D_122 + M_121 * D_131 +
+                M_130 * D_140 + M_202 * D_212 + M_211 * D_221 + M_220 * D_230 +
+                M_301 * D_311 + M_310 * D_320 + M_400 * D_410;
+  l_b->F_100 += M_004 * D_104 + M_013 * D_113 + M_022 * D_122 + M_031 * D_131 +
+                M_040 * D_140 + M_103 * D_203 + M_112 * D_212 + M_121 * D_221 +
+                M_130 * D_230 + M_202 * D_302 + M_211 * D_311 + M_220 * D_320 +
+                M_301 * D_401 + M_310 * D_410 + M_400 * D_500;
 
   /* Compute 5th order field tensor terms (addition to rank 2) */
-  l_b->F_002 += m_a->M_003 * pot.D_005 + m_a->M_012 * pot.D_014 +
-                m_a->M_021 * pot.D_023 + m_a->M_030 * pot.D_032 +
-                m_a->M_102 * pot.D_104 + m_a->M_111 * pot.D_113 +
-                m_a->M_120 * pot.D_122 + m_a->M_201 * pot.D_203 +
-                m_a->M_210 * pot.D_212 + m_a->M_300 * pot.D_302;
-  l_b->F_011 += m_a->M_003 * pot.D_014 + m_a->M_012 * pot.D_023 +
-                m_a->M_021 * pot.D_032 + m_a->M_030 * pot.D_041 +
-                m_a->M_102 * pot.D_113 + m_a->M_111 * pot.D_122 +
-                m_a->M_120 * pot.D_131 + m_a->M_201 * pot.D_212 +
-                m_a->M_210 * pot.D_221 + m_a->M_300 * pot.D_311;
-  l_b->F_020 += m_a->M_003 * pot.D_023 + m_a->M_012 * pot.D_032 +
-                m_a->M_021 * pot.D_041 + m_a->M_030 * pot.D_050 +
-                m_a->M_102 * pot.D_122 + m_a->M_111 * pot.D_131 +
-                m_a->M_120 * pot.D_140 + m_a->M_201 * pot.D_221 +
-                m_a->M_210 * pot.D_230 + m_a->M_300 * pot.D_320;
-  l_b->F_101 += m_a->M_003 * pot.D_104 + m_a->M_012 * pot.D_113 +
-                m_a->M_021 * pot.D_122 + m_a->M_030 * pot.D_131 +
-                m_a->M_102 * pot.D_203 + m_a->M_111 * pot.D_212 +
-                m_a->M_120 * pot.D_221 + m_a->M_201 * pot.D_302 +
-                m_a->M_210 * pot.D_311 + m_a->M_300 * pot.D_401;
-  l_b->F_110 += m_a->M_003 * pot.D_113 + m_a->M_012 * pot.D_122 +
-                m_a->M_021 * pot.D_131 + m_a->M_030 * pot.D_140 +
-                m_a->M_102 * pot.D_212 + m_a->M_111 * pot.D_221 +
-                m_a->M_120 * pot.D_230 + m_a->M_201 * pot.D_311 +
-                m_a->M_210 * pot.D_320 + m_a->M_300 * pot.D_410;
-  l_b->F_200 += m_a->M_003 * pot.D_203 + m_a->M_012 * pot.D_212 +
-                m_a->M_021 * pot.D_221 + m_a->M_030 * pot.D_230 +
-                m_a->M_102 * pot.D_302 + m_a->M_111 * pot.D_311 +
-                m_a->M_120 * pot.D_320 + m_a->M_201 * pot.D_401 +
-                m_a->M_210 * pot.D_410 + m_a->M_300 * pot.D_500;
+  l_b->F_002 += M_003 * D_005 + M_012 * D_014 + M_021 * D_023 + M_030 * D_032 +
+                M_102 * D_104 + M_111 * D_113 + M_120 * D_122 + M_201 * D_203 +
+                M_210 * D_212 + M_300 * D_302;
+  l_b->F_011 += M_003 * D_014 + M_012 * D_023 + M_021 * D_032 + M_030 * D_041 +
+                M_102 * D_113 + M_111 * D_122 + M_120 * D_131 + M_201 * D_212 +
+                M_210 * D_221 + M_300 * D_311;
+  l_b->F_020 += M_003 * D_023 + M_012 * D_032 + M_021 * D_041 + M_030 * D_050 +
+                M_102 * D_122 + M_111 * D_131 + M_120 * D_140 + M_201 * D_221 +
+                M_210 * D_230 + M_300 * D_320;
+  l_b->F_101 += M_003 * D_104 + M_012 * D_113 + M_021 * D_122 + M_030 * D_131 +
+                M_102 * D_203 + M_111 * D_212 + M_120 * D_221 + M_201 * D_302 +
+                M_210 * D_311 + M_300 * D_401;
+  l_b->F_110 += M_003 * D_113 + M_012 * D_122 + M_021 * D_131 + M_030 * D_140 +
+                M_102 * D_212 + M_111 * D_221 + M_120 * D_230 + M_201 * D_311 +
+                M_210 * D_320 + M_300 * D_410;
+  l_b->F_200 += M_003 * D_203 + M_012 * D_212 + M_021 * D_221 + M_030 * D_230 +
+                M_102 * D_302 + M_111 * D_311 + M_120 * D_320 + M_201 * D_401 +
+                M_210 * D_410 + M_300 * D_500;
 
   /* Compute 5th order field tensor terms (addition to rank 3) */
-  l_b->F_003 += m_a->M_002 * pot.D_005 + m_a->M_011 * pot.D_014 +
-                m_a->M_020 * pot.D_023 + m_a->M_101 * pot.D_104 +
-                m_a->M_110 * pot.D_113 + m_a->M_200 * pot.D_203;
-  l_b->F_012 += m_a->M_002 * pot.D_014 + m_a->M_011 * pot.D_023 +
-                m_a->M_020 * pot.D_032 + m_a->M_101 * pot.D_113 +
-                m_a->M_110 * pot.D_122 + m_a->M_200 * pot.D_212;
-  l_b->F_021 += m_a->M_002 * pot.D_023 + m_a->M_011 * pot.D_032 +
-                m_a->M_020 * pot.D_041 + m_a->M_101 * pot.D_122 +
-                m_a->M_110 * pot.D_131 + m_a->M_200 * pot.D_221;
-  l_b->F_030 += m_a->M_002 * pot.D_032 + m_a->M_011 * pot.D_041 +
-                m_a->M_020 * pot.D_050 + m_a->M_101 * pot.D_131 +
-                m_a->M_110 * pot.D_140 + m_a->M_200 * pot.D_230;
-  l_b->F_102 += m_a->M_002 * pot.D_104 + m_a->M_011 * pot.D_113 +
-                m_a->M_020 * pot.D_122 + m_a->M_101 * pot.D_203 +
-                m_a->M_110 * pot.D_212 + m_a->M_200 * pot.D_302;
-  l_b->F_111 += m_a->M_002 * pot.D_113 + m_a->M_011 * pot.D_122 +
-                m_a->M_020 * pot.D_131 + m_a->M_101 * pot.D_212 +
-                m_a->M_110 * pot.D_221 + m_a->M_200 * pot.D_311;
-  l_b->F_120 += m_a->M_002 * pot.D_122 + m_a->M_011 * pot.D_131 +
-                m_a->M_020 * pot.D_140 + m_a->M_101 * pot.D_221 +
-                m_a->M_110 * pot.D_230 + m_a->M_200 * pot.D_320;
-  l_b->F_201 += m_a->M_002 * pot.D_203 + m_a->M_011 * pot.D_212 +
-                m_a->M_020 * pot.D_221 + m_a->M_101 * pot.D_302 +
-                m_a->M_110 * pot.D_311 + m_a->M_200 * pot.D_401;
-  l_b->F_210 += m_a->M_002 * pot.D_212 + m_a->M_011 * pot.D_221 +
-                m_a->M_020 * pot.D_230 + m_a->M_101 * pot.D_311 +
-                m_a->M_110 * pot.D_320 + m_a->M_200 * pot.D_410;
-  l_b->F_300 += m_a->M_002 * pot.D_302 + m_a->M_011 * pot.D_311 +
-                m_a->M_020 * pot.D_320 + m_a->M_101 * pot.D_401 +
-                m_a->M_110 * pot.D_410 + m_a->M_200 * pot.D_500;
+  l_b->F_003 += M_002 * D_005 + M_011 * D_014 + M_020 * D_023 + M_101 * D_104 +
+                M_110 * D_113 + M_200 * D_203;
+  l_b->F_012 += M_002 * D_014 + M_011 * D_023 + M_020 * D_032 + M_101 * D_113 +
+                M_110 * D_122 + M_200 * D_212;
+  l_b->F_021 += M_002 * D_023 + M_011 * D_032 + M_020 * D_041 + M_101 * D_122 +
+                M_110 * D_131 + M_200 * D_221;
+  l_b->F_030 += M_002 * D_032 + M_011 * D_041 + M_020 * D_050 + M_101 * D_131 +
+                M_110 * D_140 + M_200 * D_230;
+  l_b->F_102 += M_002 * D_104 + M_011 * D_113 + M_020 * D_122 + M_101 * D_203 +
+                M_110 * D_212 + M_200 * D_302;
+  l_b->F_111 += M_002 * D_113 + M_011 * D_122 + M_020 * D_131 + M_101 * D_212 +
+                M_110 * D_221 + M_200 * D_311;
+  l_b->F_120 += M_002 * D_122 + M_011 * D_131 + M_020 * D_140 + M_101 * D_221 +
+                M_110 * D_230 + M_200 * D_320;
+  l_b->F_201 += M_002 * D_203 + M_011 * D_212 + M_020 * D_221 + M_101 * D_302 +
+                M_110 * D_311 + M_200 * D_401;
+  l_b->F_210 += M_002 * D_212 + M_011 * D_221 + M_020 * D_230 + M_101 * D_311 +
+                M_110 * D_320 + M_200 * D_410;
+  l_b->F_300 += M_002 * D_302 + M_011 * D_311 + M_020 * D_320 + M_101 * D_401 +
+                M_110 * D_410 + M_200 * D_500;
 
   /* Compute 5th order field tensor terms (addition to rank 4) */
-  l_b->F_004 +=
-      m_a->M_001 * pot.D_005 + m_a->M_010 * pot.D_014 + m_a->M_100 * pot.D_104;
-  l_b->F_013 +=
-      m_a->M_001 * pot.D_014 + m_a->M_010 * pot.D_023 + m_a->M_100 * pot.D_113;
-  l_b->F_022 +=
-      m_a->M_001 * pot.D_023 + m_a->M_010 * pot.D_032 + m_a->M_100 * pot.D_122;
-  l_b->F_031 +=
-      m_a->M_001 * pot.D_032 + m_a->M_010 * pot.D_041 + m_a->M_100 * pot.D_131;
-  l_b->F_040 +=
-      m_a->M_001 * pot.D_041 + m_a->M_010 * pot.D_050 + m_a->M_100 * pot.D_140;
-  l_b->F_103 +=
-      m_a->M_001 * pot.D_104 + m_a->M_010 * pot.D_113 + m_a->M_100 * pot.D_203;
-  l_b->F_112 +=
-      m_a->M_001 * pot.D_113 + m_a->M_010 * pot.D_122 + m_a->M_100 * pot.D_212;
-  l_b->F_121 +=
-      m_a->M_001 * pot.D_122 + m_a->M_010 * pot.D_131 + m_a->M_100 * pot.D_221;
-  l_b->F_130 +=
-      m_a->M_001 * pot.D_131 + m_a->M_010 * pot.D_140 + m_a->M_100 * pot.D_230;
-  l_b->F_202 +=
-      m_a->M_001 * pot.D_203 + m_a->M_010 * pot.D_212 + m_a->M_100 * pot.D_302;
-  l_b->F_211 +=
-      m_a->M_001 * pot.D_212 + m_a->M_010 * pot.D_221 + m_a->M_100 * pot.D_311;
-  l_b->F_220 +=
-      m_a->M_001 * pot.D_221 + m_a->M_010 * pot.D_230 + m_a->M_100 * pot.D_320;
-  l_b->F_301 +=
-      m_a->M_001 * pot.D_302 + m_a->M_010 * pot.D_311 + m_a->M_100 * pot.D_401;
-  l_b->F_310 +=
-      m_a->M_001 * pot.D_311 + m_a->M_010 * pot.D_320 + m_a->M_100 * pot.D_410;
-  l_b->F_400 +=
-      m_a->M_001 * pot.D_401 + m_a->M_010 * pot.D_410 + m_a->M_100 * pot.D_500;
+  l_b->F_004 += M_001 * D_005 + M_010 * D_014 + M_100 * D_104;
+  l_b->F_013 += M_001 * D_014 + M_010 * D_023 + M_100 * D_113;
+  l_b->F_022 += M_001 * D_023 + M_010 * D_032 + M_100 * D_122;
+  l_b->F_031 += M_001 * D_032 + M_010 * D_041 + M_100 * D_131;
+  l_b->F_040 += M_001 * D_041 + M_010 * D_050 + M_100 * D_140;
+  l_b->F_103 += M_001 * D_104 + M_010 * D_113 + M_100 * D_203;
+  l_b->F_112 += M_001 * D_113 + M_010 * D_122 + M_100 * D_212;
+  l_b->F_121 += M_001 * D_122 + M_010 * D_131 + M_100 * D_221;
+  l_b->F_130 += M_001 * D_131 + M_010 * D_140 + M_100 * D_230;
+  l_b->F_202 += M_001 * D_203 + M_010 * D_212 + M_100 * D_302;
+  l_b->F_211 += M_001 * D_212 + M_010 * D_221 + M_100 * D_311;
+  l_b->F_220 += M_001 * D_221 + M_010 * D_230 + M_100 * D_320;
+  l_b->F_301 += M_001 * D_302 + M_010 * D_311 + M_100 * D_401;
+  l_b->F_310 += M_001 * D_311 + M_010 * D_320 + M_100 * D_410;
+  l_b->F_400 += M_001 * D_401 + M_010 * D_410 + M_100 * D_500;
 
   /* Compute 5th order field tensor terms (addition to rank 5) */
-  l_b->F_005 += m_a->M_000 * pot.D_005;
-  l_b->F_014 += m_a->M_000 * pot.D_014;
-  l_b->F_023 += m_a->M_000 * pot.D_023;
-  l_b->F_032 += m_a->M_000 * pot.D_032;
-  l_b->F_041 += m_a->M_000 * pot.D_041;
-  l_b->F_050 += m_a->M_000 * pot.D_050;
-  l_b->F_104 += m_a->M_000 * pot.D_104;
-  l_b->F_113 += m_a->M_000 * pot.D_113;
-  l_b->F_122 += m_a->M_000 * pot.D_122;
-  l_b->F_131 += m_a->M_000 * pot.D_131;
-  l_b->F_140 += m_a->M_000 * pot.D_140;
-  l_b->F_203 += m_a->M_000 * pot.D_203;
-  l_b->F_212 += m_a->M_000 * pot.D_212;
-  l_b->F_221 += m_a->M_000 * pot.D_221;
-  l_b->F_230 += m_a->M_000 * pot.D_230;
-  l_b->F_302 += m_a->M_000 * pot.D_302;
-  l_b->F_311 += m_a->M_000 * pot.D_311;
-  l_b->F_320 += m_a->M_000 * pot.D_320;
-  l_b->F_401 += m_a->M_000 * pot.D_401;
-  l_b->F_410 += m_a->M_000 * pot.D_410;
-  l_b->F_500 += m_a->M_000 * pot.D_500;
+  l_b->F_005 += M_000 * D_005;
+  l_b->F_014 += M_000 * D_014;
+  l_b->F_023 += M_000 * D_023;
+  l_b->F_032 += M_000 * D_032;
+  l_b->F_041 += M_000 * D_041;
+  l_b->F_050 += M_000 * D_050;
+  l_b->F_104 += M_000 * D_104;
+  l_b->F_113 += M_000 * D_113;
+  l_b->F_122 += M_000 * D_122;
+  l_b->F_131 += M_000 * D_131;
+  l_b->F_140 += M_000 * D_140;
+  l_b->F_203 += M_000 * D_203;
+  l_b->F_212 += M_000 * D_212;
+  l_b->F_221 += M_000 * D_221;
+  l_b->F_230 += M_000 * D_230;
+  l_b->F_302 += M_000 * D_302;
+  l_b->F_311 += M_000 * D_311;
+  l_b->F_320 += M_000 * D_320;
+  l_b->F_401 += M_000 * D_401;
+  l_b->F_410 += M_000 * D_410;
+  l_b->F_500 += M_000 * D_500;
 
 #endif
 #if SELF_GRAVITY_MULTIPOLE_ORDER > 5
@@ -1935,6 +1946,109 @@ INLINE static void gravity_M2L(struct grav_tensor *l_b,
 #endif
 }
 
+/**
+ * @brief Compute the field tensor due to a multipole.
+ *
+ * @param l_b The field tensor to compute.
+ * @param m_a The multipole.
+ * @param pos_b The position of the field tensor.
+ * @param pos_a The position of the multipole.
+ * @param props The #gravity_props of this calculation.
+ * @param periodic Is the calculation periodic ?
+ * @param dim The size of the simulation box.
+ * @param rs_inv The inverse of the gravity mesh-smoothing scale.
+ */
+INLINE static void gravity_M2L_nonsym(
+    struct grav_tensor *l_b, const struct multipole *m_a, const double pos_b[3],
+    const double pos_a[3], const struct gravity_props *props,
+    const int periodic, const double dim[3], const float rs_inv) {
+
+  /* Recover some constants */
+  const float eps = props->epsilon_cur;
+  const float eps_inv = props->epsilon_cur_inv;
+
+  /* Compute distance vector */
+  float dx = (float)(pos_b[0] - pos_a[0]);
+  float dy = (float)(pos_b[1] - pos_a[1]);
+  float dz = (float)(pos_b[2] - pos_a[2]);
+
+  /* Apply BC */
+  if (periodic) {
+    dx = nearest(dx, dim[0]);
+    dy = nearest(dy, dim[1]);
+    dz = nearest(dz, dim[2]);
+  }
+
+  /* Compute distance */
+  const float r2 = dx * dx + dy * dy + dz * dz;
+  const float r_inv = 1. / sqrtf(r2);
+
+  /* Compute all derivatives */
+  struct potential_derivatives_M2L pot;
+  potential_derivatives_compute_M2L(dx, dy, dz, r2, r_inv, eps, eps_inv,
+                                    periodic, rs_inv, &pot);
+
+  /* Do the M2L tensor multiplication */
+  gravity_M2L_apply(l_b, m_a, &pot);
+}
+
+/**
+ * @brief Compute the field tensor due to a multipole and the symmetric
+ * equivalent.
+ *
+ * @param l_a The first field tensor to compute.
+ * @param l_b The second field tensor to compute.
+ * @param m_a The first multipole.
+ * @param m_b The second multipole.
+ * @param pos_a The position of the first m-pole and field tensor.
+ * @param pos_b The position of the second m-pole and field tensor.
+ * @param props The #gravity_props of this calculation.
+ * @param periodic Is the calculation periodic ?
+ * @param dim The size of the simulation box.
+ * @param rs_inv The inverse of the gravity mesh-smoothing scale.
+ */
+INLINE static void gravity_M2L_symmetric(
+    struct grav_tensor *restrict l_a, struct grav_tensor *restrict l_b,
+    const struct multipole *restrict m_a, const struct multipole *restrict m_b,
+    const double pos_a[3], const double pos_b[3],
+    const struct gravity_props *props, const int periodic, const double dim[3],
+    const float rs_inv) {
+
+  /* Recover some constants */
+  const float eps = props->epsilon_cur;
+  const float eps_inv = props->epsilon_cur_inv;
+
+  /* Compute distance vector */
+  float dx = (float)(pos_b[0] - pos_a[0]);
+  float dy = (float)(pos_b[1] - pos_a[1]);
+  float dz = (float)(pos_b[2] - pos_a[2]);
+
+  /* Apply BC */
+  if (periodic) {
+    dx = nearest(dx, dim[0]);
+    dy = nearest(dy, dim[1]);
+    dz = nearest(dz, dim[2]);
+  }
+
+  /* Compute distance */
+  const float r2 = dx * dx + dy * dy + dz * dz;
+  const float r_inv = 1. / sqrtf(r2);
+
+  /* Compute all derivatives */
+  struct potential_derivatives_M2L pot;
+  potential_derivatives_compute_M2L(dx, dy, dz, r2, r_inv, eps, eps_inv,
+                                    periodic, rs_inv, &pot);
+
+  /* Do the first M2L tensor multiplication */
+  gravity_M2L_apply(l_b, m_a, &pot);
+
+  /* Flip the signs of odd derivatives */
+  potential_derivatives_flip_signs(&pot);
+
+  /* Do the second M2L tensor multiplication */
+  gravity_M2L_apply(l_a, m_b, &pot);
+}
+
 /**
  * @brief Creates a copy of #grav_tensor shifted to a new location.
  *
@@ -1945,8 +2059,8 @@ INLINE static void gravity_M2L(struct grav_tensor *l_b,
  * @param pos_a The position to which m_b will be shifted.
  * @param pos_b The current postion of the multipole to shift.
  */
-INLINE static void gravity_L2L(struct grav_tensor *la,
-                               const struct grav_tensor *lb,
+INLINE static void gravity_L2L(struct grav_tensor *restrict la,
+                               const struct grav_tensor *restrict lb,
                                const double pos_a[3], const double pos_b[3]) {
 
   /* Initialise everything to zero */
diff --git a/src/outputlist.c b/src/outputlist.c
index fd33370ca45f25c17ecd2cc8df622138842507f3..cab4013bc3841698183b825c7985a75e7095b29c 100644
--- a/src/outputlist.c
+++ b/src/outputlist.c
@@ -112,6 +112,9 @@ void output_list_read_file(struct output_list *outputlist, const char *filename,
     ind += 1;
   }
 
+  /* Cleanup */
+  free(line);
+
   if (ind != outputlist->size)
     error("Did not read the correct number of output times.");
 
@@ -232,7 +235,7 @@ void output_list_init(struct output_list **list, const struct engine *e,
   sprintf(param_name, "%s:output_list", name);
   parser_get_param_string(params, param_name, filename);
 
-  message("Reading %s output file.", name);
+  if (e->verbose) message("Reading %s output file.", name);
   output_list_read_file(*list, filename, cosmo);
 
   if ((*list)->size < 2)
@@ -266,8 +269,12 @@ void output_list_print(const struct output_list *outputlist) {
 /**
  * @brief Clean an #output_list
  */
-void output_list_clean(struct output_list *outputlist) {
-  free(outputlist->times);
+void output_list_clean(struct output_list **outputlist) {
+  if (*outputlist) {
+    free((*outputlist)->times);
+    free(*outputlist);
+    *outputlist = NULL;
+  }
 }
 
 /**
diff --git a/src/outputlist.h b/src/outputlist.h
index 6045d75ea29f0aab44252835147502f3df0de20c..b7b12ca32f469c70f716553b30a15f48198f8e5e 100644
--- a/src/outputlist.h
+++ b/src/outputlist.h
@@ -58,7 +58,7 @@ void output_list_read_next_time(struct output_list *t, const struct engine *e,
 void output_list_init(struct output_list **list, const struct engine *e,
                       const char *name, double *delta_time, double *time_first);
 void output_list_print(const struct output_list *outputlist);
-void output_list_clean(struct output_list *outputlist);
+void output_list_clean(struct output_list **outputlist);
 void output_list_struct_dump(struct output_list *list, FILE *stream);
 void output_list_struct_restore(struct output_list *list, FILE *stream);
 
diff --git a/src/parallel_io.c b/src/parallel_io.c
index 9d0936bcd1d4ec31c7b6bf5eed139dc6c62a2e26..caf813402d9e50ac4d165d8b731c2c4145e097a8 100644
--- a/src/parallel_io.c
+++ b/src/parallel_io.c
@@ -42,6 +42,7 @@
 #include "cooling_io.h"
 #include "dimension.h"
 #include "engine.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "gravity_io.h"
 #include "gravity_properties.h"
@@ -52,8 +53,11 @@
 #include "memuse.h"
 #include "part.h"
 #include "part_type.h"
+#include "star_formation_io.h"
 #include "stars_io.h"
+#include "tracers_io.h"
 #include "units.h"
+#include "velociraptor_io.h"
 #include "xmf.h"
 
 /* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */
@@ -139,7 +143,37 @@ void readArray_chunk(hid_t h_data, hid_t h_plist_id,
       for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
     } else {
       float* temp_f = (float*)temp;
-      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+
+#ifdef SWIFT_DEBUG_CHECKS
+      float maximum = 0.;
+      float minimum = FLT_MAX;
+#endif
+
+      /* Loop that converts the Units */
+      for (size_t i = 0; i < num_elements; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Find the absolute minimum and maximum values */
+        const float abstemp_f = fabsf(temp_f[i]);
+        if (abstemp_f != 0.f) {
+          maximum = max(maximum, abstemp_f);
+          minimum = min(minimum, abstemp_f);
+        }
+#endif
+
+        /* Convert the float units */
+        temp_f[i] *= factor;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* The two possible errors: larger than float or smaller
+       * than float precission. */
+      if (factor * maximum > FLT_MAX) {
+        error("Unit conversion results in numbers larger than floats");
+      } else if (factor * minimum < FLT_MIN) {
+        error("Numbers smaller than float precision");
+      }
+#endif
     }
   }
 
@@ -304,6 +338,8 @@ void readArray(hid_t grp, struct io_props props, size_t N, long long N_total,
       N -= max_chunk_size;
       props.field += max_chunk_size * props.partSize; /* char* on the field */
       props.parts += max_chunk_size;                  /* part* on the part */
+      props.xparts += max_chunk_size;                 /* xpart* on the xpart */
+      props.gparts += max_chunk_size;                 /* gpart* on the gpart */
       offset += max_chunk_size;
       redo = 1;
     } else {
@@ -353,13 +389,13 @@ void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
     rank = 2;
     shape[0] = N_total;
     shape[1] = props.dimension;
-    chunk_shape[0] = 1 << 16; /* Just a guess...*/
+    chunk_shape[0] = 1 << 20; /* Just a guess...*/
     chunk_shape[1] = props.dimension;
   } else {
     rank = 1;
     shape[0] = N_total;
     shape[1] = 0;
-    chunk_shape[0] = 1 << 16; /* Just a guess...*/
+    chunk_shape[0] = 1 << 20; /* Just a guess...*/
     chunk_shape[1] = 0;
   }
 
@@ -400,8 +436,9 @@ void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
   io_write_attribute_s(h_data, "Conversion factor", buffer);
 
   /* Add a line to the XMF */
-  xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
-                 props.dimension, props.type);
+  if (xmfFile != NULL)
+    xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
+                   props.dimension, props.type);
 
   /* Close everything */
   H5Pclose(h_plist_id);
@@ -576,6 +613,8 @@ void writeArray(struct engine* e, hid_t grp, char* fileName,
       N -= max_chunk_size;
       props.field += max_chunk_size * props.partSize; /* char* on the field */
       props.parts += max_chunk_size;                  /* part* on the part */
+      props.xparts += max_chunk_size;                 /* xpart* on the xpart */
+      props.gparts += max_chunk_size;                 /* gpart* on the gpart */
       offset += max_chunk_size;
       redo = 1;
     } else {
@@ -615,7 +654,6 @@ void writeArray(struct engine* e, hid_t grp, char* fileName,
  * @param Ngas (output) The number of particles read from the file.
  * @param Ngparts (output) The number of particles read from the file.
  * @param Nstars (output) The number of particles read from the file.
- * @param periodic (output) 1 if the volume is periodic, 0 if not.
  * @param flag_entropy (output) 1 if the ICs contained Entropy in the
  * InternalEnergy field
  * @param with_hydro Are we running with hydro ?
@@ -637,11 +675,11 @@ void writeArray(struct engine* e, hid_t grp, char* fileName,
 void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
                       double dim[3], struct part** parts, struct gpart** gparts,
                       struct spart** sparts, size_t* Ngas, size_t* Ngparts,
-                      size_t* Nstars, int* periodic, int* flag_entropy,
-                      int with_hydro, int with_gravity, int with_stars,
-                      int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                      int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info,
-                      int n_threads, int dry_run) {
+                      size_t* Nstars, int* flag_entropy, int with_hydro,
+                      int with_gravity, int with_stars, int cleanup_h,
+                      int cleanup_sqrt_a, double h, double a, int mpi_rank,
+                      int mpi_size, MPI_Comm comm, MPI_Info info, int n_threads,
+                      int dry_run) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -661,17 +699,6 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
   h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
   if (h_file < 0) error("Error while opening file '%s'.", fileName);
 
-  /* Open header to read simulation properties */
-  /* message("Reading runtime parameters..."); */
-  h_grp = H5Gopen(h_file, "/RuntimePars", H5P_DEFAULT);
-  if (h_grp < 0) error("Error while opening runtime parameters\n");
-
-  /* Read the relevant information */
-  io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
-
-  /* Close runtime parameters */
-  H5Gclose(h_grp);
-
   /* Open header to read simulation properties */
   /* message("Reading file header..."); */
   h_grp = H5Gopen(h_file, "/Header", H5P_DEFAULT);
@@ -686,6 +713,21 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
     error("ICs dimensionality (%dD) does not match code dimensionality (%dD)",
           dimension, (int)hydro_dimension);
 
+  /* Check whether the number of files is specified (if the info exists) */
+  const hid_t hid_files = H5Aexists(h_grp, "NumFilesPerSnapshot");
+  int num_files = 1;
+  if (hid_files < 0)
+    error(
+        "Error while testing the existance of 'NumFilesPerSnapshot' attribute");
+  if (hid_files > 0)
+    io_read_attribute(h_grp, "NumFilesPerSnapshot", INT, &num_files);
+  if (num_files != 1)
+    error(
+        "ICs are split over multiples files (%d). SWIFT cannot handle this "
+        "case. The script /tools/combine_ics.py is availalbe in the repository "
+        "to combine files into a valid input file.",
+        num_files);
+
   /* Read the relevant information and print status */
   int flag_entropy_temp[6];
   io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp);
@@ -781,12 +823,12 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
     memuse_report("parts", (*Ngas) * sizeof(struct part));
   }
 
-  /* Allocate memory to store star particles */
+  /* Allocate memory to store stars particles */
   if (with_stars) {
-    *Nstars = N[swift_type_star];
+    *Nstars = N[swift_type_stars];
     if (posix_memalign((void**)sparts, spart_align,
                        *Nstars * sizeof(struct spart)) != 0)
-      error("Error while allocating memory for star particles");
+      error("Error while allocating memory for stars particles");
     bzero(*sparts, *Nstars * sizeof(struct spart));
     memuse_report("sparts", (*Nstars) * sizeof(struct spart));
   }
@@ -796,7 +838,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
     Ndm = N[1];
     *Ngparts = (with_hydro ? N[swift_type_gas] : 0) +
                N[swift_type_dark_matter] +
-               (with_stars ? N[swift_type_star] : 0);
+               (with_stars ? N[swift_type_stars] : 0);
     if (posix_memalign((void**)gparts, gpart_align,
                        *Ngparts * sizeof(struct gpart)) != 0)
       error("Error while allocating memory for gravity particles");
@@ -846,10 +888,10 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
         }
         break;
 
-      case swift_type_star:
+      case swift_type_stars:
         if (with_stars) {
           Nparticles = *Nstars;
-          star_read_particles(*sparts, list, &num_fields);
+          stars_read_particles(*sparts, list, &num_fields);
         }
         break;
 
@@ -882,9 +924,9 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
     /* Duplicate the hydro particles into gparts */
     if (with_hydro) io_duplicate_hydro_gparts(&tp, *parts, *gparts, *Ngas, Ndm);
 
-    /* Duplicate the star particles into gparts */
+    /* Duplicate the stars particles into gparts */
     if (with_stars)
-      io_duplicate_star_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
+      io_duplicate_stars_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
 
     threadpool_clean(&tp);
   }
@@ -919,8 +961,17 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
   const struct gpart* gparts = e->s->gparts;
   const struct spart* sparts = e->s->sparts;
   struct swift_params* params = e->parameter_file;
+  const int with_cosmology = e->policy & engine_policy_cosmology;
+  const int with_cooling = e->policy & engine_policy_cooling;
+  const int with_temperature = e->policy & engine_policy_temperature;
+#ifdef HAVE_VELOCIRAPTOR
+  const int with_stf = (e->policy & engine_policy_structure_finding) &&
+                       (e->s->gpart_group_data != NULL);
+#else
+  const int with_stf = 0;
+#endif
+
   FILE* xmfFile = 0;
-  int periodic = e->s->periodic;
   int numFiles = 1;
 
   /* First time, we need to create the XMF file */
@@ -931,13 +982,12 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
 
   /* HDF5 File name */
   char fileName[FILENAME_BUFFER_SIZE];
-  if (e->snapshot_label_delta == 1)
-    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-             e->snapshot_output_count + e->snapshot_label_first);
-  else
+  if (e->snapshot_int_time_label_on)
     snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%06i.hdf5", baseName,
-             e->snapshot_output_count * e->snapshot_label_delta +
-                 e->snapshot_label_first);
+             (int)round(e->time));
+  else
+    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+             e->snapshot_output_count);
 
   /* Open HDF5 file with the chosen parameters */
   hid_t h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -947,28 +997,26 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
    * specific output */
   xmf_write_outputheader(xmfFile, fileName, e->time);
 
-  /* Open header to write simulation properties */
-  /* message("Writing runtime parameters..."); */
-  hid_t h_grp =
-      H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating runtime parameters group\n");
-
-  /* Write the relevant information */
-  io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
-
-  /* Close runtime parameters */
-  H5Gclose(h_grp);
-
   /* Open header to write simulation properties */
   /* message("Writing file header..."); */
-  h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  hid_t h_grp =
+      H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating file header\n");
 
+  /* Convert basic output information to snapshot units */
+  const double factor_time =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_TIME);
+  const double factor_length =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_LENGTH);
+  const double dblTime = e->time * factor_time;
+  const double dim[3] = {e->s->dim[0] * factor_length,
+                         e->s->dim[1] * factor_length,
+                         e->s->dim[2] * factor_length};
+
   /* Print the relevant information and print status */
-  io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
-  double dblTime = e->time;
+  io_write_attribute(h_grp, "BoxSize", DOUBLE, dim, 3);
   io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
-  int dimension = (int)hydro_dimension;
+  const int dimension = (int)hydro_dimension;
   io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
   io_write_attribute(h_grp, "Redshift", DOUBLE, &e->cosmology->z, 1);
   io_write_attribute(h_grp, "Scale-factor", DOUBLE, &e->cosmology->a, 1);
@@ -1021,8 +1069,10 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
   h_grp = H5Gcreate(h_file, "/SubgridScheme", H5P_DEFAULT, H5P_DEFAULT,
                     H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating subgrid group");
-  cooling_write_flavour(h_grp);
+  entropy_floor_write_flavour(h_grp);
+  cooling_write_flavour(h_grp, e->cooling_func);
   chemistry_write_flavour(h_grp);
+  tracers_write_flavour(h_grp);
   H5Gclose(h_grp);
 
   /* Print the gravity parameters */
@@ -1034,7 +1084,16 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
     H5Gclose(h_grp);
   }
 
-  /* Print the gravity parameters */
+  /* Print the stellar parameters */
+  if (e->policy & engine_policy_stars) {
+    h_grp = H5Gcreate(h_file, "/StarsScheme", H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0) error("Error while creating stars group");
+    stars_props_print_snapshot(h_grp, e->stars_properties);
+    H5Gclose(h_grp);
+  }
+
+  /* Print the cosmological parameters */
   h_grp =
       H5Gcreate(h_file, "/Cosmology", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating cosmology group");
@@ -1094,14 +1153,36 @@ void prepare_file(struct engine* e, const char* baseName, long long N_total[6],
       case swift_type_gas:
         hydro_write_particles(parts, xparts, list, &num_fields);
         num_fields += chemistry_write_particles(parts, list + num_fields);
+        if (with_cooling || with_temperature) {
+          num_fields += cooling_write_particles(
+              parts, xparts, list + num_fields, e->cooling_func);
+        }
+        num_fields += tracers_write_particles(parts, xparts, list + num_fields,
+                                              with_cosmology);
+        num_fields +=
+            star_formation_write_particles(parts, xparts, list + num_fields);
+        if (with_stf) {
+          num_fields +=
+              velociraptor_write_parts(parts, xparts, list + num_fields);
+        }
         break;
 
       case swift_type_dark_matter:
         darkmatter_write_particles(gparts, list, &num_fields);
+        if (with_stf) {
+          num_fields += velociraptor_write_gparts(e->s->gpart_group_data,
+                                                  list + num_fields);
+        }
         break;
 
-      case swift_type_star:
-        star_write_particles(sparts, list, &num_fields);
+      case swift_type_stars:
+        stars_write_particles(sparts, list, &num_fields);
+        num_fields += chemistry_write_sparticles(sparts, list + num_fields);
+        num_fields +=
+            tracers_write_sparticles(sparts, list + num_fields, with_cosmology);
+        if (with_stf) {
+          num_fields += velociraptor_write_sparts(sparts, list + num_fields);
+        }
         break;
 
       default:
@@ -1163,22 +1244,42 @@ void write_output_parallel(struct engine* e, const char* baseName,
                            int mpi_rank, int mpi_size, MPI_Comm comm,
                            MPI_Info info) {
 
-  const size_t Ngas = e->s->nr_parts;
-  const size_t Nstars = e->s->nr_sparts;
-  const size_t Ntot = e->s->nr_gparts;
   const struct part* parts = e->s->parts;
   const struct xpart* xparts = e->s->xparts;
   const struct gpart* gparts = e->s->gparts;
-  struct gpart* dmparts = NULL;
   const struct spart* sparts = e->s->sparts;
-  const struct cooling_function_data* cooling = e->cooling_func;
   struct swift_params* params = e->parameter_file;
+  const int with_cosmology = e->policy & engine_policy_cosmology;
+  const int with_cooling = e->policy & engine_policy_cooling;
+  const int with_temperature = e->policy & engine_policy_temperature;
+#ifdef HAVE_VELOCIRAPTOR
+  const int with_stf = (e->policy & engine_policy_structure_finding) &&
+                       (e->s->gpart_group_data != NULL);
+#else
+  const int with_stf = 0;
+#endif
 
-  /* Number of unassociated gparts */
-  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
+  /* Number of particles currently in the arrays */
+  const size_t Ntot = e->s->nr_gparts;
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  // const size_t Nbaryons = Ngas + Nstars;
+  // const size_t Ndm = Ntot > 0 ? Ntot - Nbaryons : 0;
+
+  /* Number of particles that we will write */
+  const size_t Ntot_written =
+      e->s->nr_gparts - e->s->nr_inhibited_gparts - e->s->nr_extra_gparts;
+  const size_t Ngas_written =
+      e->s->nr_parts - e->s->nr_inhibited_parts - e->s->nr_extra_parts;
+  const size_t Nstars_written =
+      e->s->nr_sparts - e->s->nr_inhibited_sparts - e->s->nr_extra_sparts;
+  const size_t Nbaryons_written = Ngas_written + Nstars_written;
+  const size_t Ndm_written =
+      Ntot_written > 0 ? Ntot_written - Nbaryons_written : 0;
 
   /* Compute offset in the file and total number of particles */
-  size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
+  size_t N[swift_type_count] = {
+      Ngas_written, Ndm_written, 0, 0, Nstars_written, 0};
   long long N_total[swift_type_count] = {0};
   long long offset[swift_type_count] = {0};
   MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
@@ -1212,13 +1313,38 @@ void write_output_parallel(struct engine* e, const char* baseName,
 
   /* HDF5 File name */
   char fileName[FILENAME_BUFFER_SIZE];
-  if (e->snapshot_label_delta == 1)
-    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-             e->snapshot_output_count + e->snapshot_label_first);
-  else
+  if (e->snapshot_int_time_label_on)
     snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%06i.hdf5", baseName,
-             e->snapshot_output_count * e->snapshot_label_delta +
-                 e->snapshot_label_first);
+             (int)round(e->time));
+  else
+    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+             e->snapshot_output_count);
+
+  /* Now write the top-level cell structure */
+  hid_t h_file_cells = 0, h_grp_cells = 0;
+  if (mpi_rank == 0) {
+
+    /* Open the snapshot on rank 0 */
+    h_file_cells = H5Fopen(fileName, H5F_ACC_RDWR, H5P_DEFAULT);
+    if (h_file_cells < 0)
+      error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);
+
+    /* Create the group we want in the file */
+    h_grp_cells = H5Gcreate(h_file_cells, "/Cells", H5P_DEFAULT, H5P_DEFAULT,
+                            H5P_DEFAULT);
+    if (h_grp_cells < 0) error("Error while creating cells group");
+  }
+
+  /* Write the location of the particles in the arrays */
+  io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->cells_top,
+                        e->s->nr_cells, e->s->width, mpi_rank, N_total, offset,
+                        internal_units, snapshot_units);
+
+  /* Close everything */
+  if (mpi_rank == 0) {
+    H5Gclose(h_grp_cells);
+    H5Fclose(h_file_cells);
+  }
 
   /* Prepare some file-access properties */
   hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1253,8 +1379,8 @@ void write_output_parallel(struct engine* e, const char* baseName,
 #if H5_VERSION_GE(1, 10, 0)
   h_err = H5Pset_all_coll_metadata_ops(plist_id, 1);
   if (h_err < 0) error("Error setting collective meta-data on all ops");
-  h_err = H5Pset_coll_metadata_write(plist_id, 1);
-  if (h_err < 0) error("Error setting collective meta-data writes");
+    // h_err = H5Pset_coll_metadata_write(plist_id, 1);
+    // if (h_err < 0) error("Error setting collective meta-data writes");
 #endif
 
 #ifdef IO_SPEED_MEASUREMENT
@@ -1330,38 +1456,155 @@ void write_output_parallel(struct engine* e, const char* baseName,
     struct io_props list[100];
     size_t Nparticles = 0;
 
+    struct part* parts_written = NULL;
+    struct xpart* xparts_written = NULL;
+    struct gpart* gparts_written = NULL;
+    struct velociraptor_gpart_data* gpart_group_data_written = NULL;
+    struct spart* sparts_written = NULL;
+
     /* Write particle fields from the particle structure */
     switch (ptype) {
 
-      case swift_type_gas:
-        Nparticles = Ngas;
-        hydro_write_particles(parts, xparts, list, &num_fields);
-        num_fields += chemistry_write_particles(parts, list + num_fields);
-        num_fields +=
-            cooling_write_particles(xparts, list + num_fields, cooling);
-        break;
-
-      case swift_type_dark_matter:
-        /* Allocate temporary array */
-        if (posix_memalign((void**)&dmparts, gpart_align,
-                           Ndm * sizeof(struct gpart)) != 0)
-          error(
-              "Error while allocating temporary memory for "
-              "DM particles");
-        bzero(dmparts, Ndm * sizeof(struct gpart));
-
-        /* Collect the DM particles from gpart */
-        io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
-
-        /* Write DM particles */
-        Nparticles = Ndm;
-        darkmatter_write_particles(dmparts, list, &num_fields);
-        break;
-
-      case swift_type_star:
-        Nparticles = Nstars;
-        star_write_particles(sparts, list, &num_fields);
-        break;
+      case swift_type_gas: {
+        if (Ngas == Ngas_written) {
+
+          /* No inhibted particles: easy case */
+          Nparticles = Ngas;
+          hydro_write_particles(parts, xparts, list, &num_fields);
+          num_fields += chemistry_write_particles(parts, list + num_fields);
+          if (with_cooling || with_temperature) {
+            num_fields += cooling_write_particles(
+                parts, xparts, list + num_fields, e->cooling_func);
+          }
+          if (with_stf) {
+            num_fields +=
+                velociraptor_write_parts(parts, xparts, list + num_fields);
+          }
+          num_fields += tracers_write_particles(
+              parts, xparts, list + num_fields, with_cosmology);
+          num_fields +=
+              star_formation_write_particles(parts, xparts, list + num_fields);
+
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          Nparticles = Ngas_written;
+
+          /* Allocate temporary arrays */
+          if (posix_memalign((void**)&parts_written, part_align,
+                             Ngas_written * sizeof(struct part)) != 0)
+            error("Error while allocating temporart memory for parts");
+          if (posix_memalign((void**)&xparts_written, xpart_align,
+                             Ngas_written * sizeof(struct xpart)) != 0)
+            error("Error while allocating temporart memory for xparts");
+
+          /* Collect the particles we want to write */
+          io_collect_parts_to_write(parts, xparts, parts_written,
+                                    xparts_written, Ngas, Ngas_written);
+
+          /* Select the fields to write */
+          hydro_write_particles(parts_written, xparts_written, list,
+                                &num_fields);
+          num_fields +=
+              chemistry_write_particles(parts_written, list + num_fields);
+          if (with_cooling || with_temperature) {
+            num_fields +=
+                cooling_write_particles(parts_written, xparts_written,
+                                        list + num_fields, e->cooling_func);
+          }
+          if (with_stf) {
+            num_fields += velociraptor_write_parts(
+                parts_written, xparts_written, list + num_fields);
+          }
+          num_fields += tracers_write_particles(
+              parts_written, xparts_written, list + num_fields, with_cosmology);
+          num_fields += star_formation_write_particles(
+              parts_written, xparts_written, list + num_fields);
+        }
+      } break;
+
+      case swift_type_dark_matter: {
+        if (Ntot == Ndm_written) {
+
+          /* This is a DM-only run without inhibited particles */
+          Nparticles = Ntot;
+          darkmatter_write_particles(gparts, list, &num_fields);
+          if (with_stf) {
+            num_fields += velociraptor_write_gparts(e->s->gpart_group_data,
+                                                    list + num_fields);
+          }
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          Nparticles = Ndm_written;
+
+          /* Allocate temporary array */
+          if (posix_memalign((void**)&gparts_written, gpart_align,
+                             Ndm_written * sizeof(struct gpart)) != 0)
+            error("Error while allocating temporart memory for gparts");
+
+          if (with_stf) {
+            if (posix_memalign(
+                    (void**)&gpart_group_data_written, gpart_align,
+                    Ndm_written * sizeof(struct velociraptor_gpart_data)) != 0)
+              error(
+                  "Error while allocating temporart memory for gparts STF "
+                  "data");
+          }
+
+          /* Collect the non-inhibited DM particles from gpart */
+          io_collect_gparts_to_write(gparts, e->s->gpart_group_data,
+                                     gparts_written, gpart_group_data_written,
+                                     Ntot, Ndm_written, with_stf);
+
+          /* Select the fields to write */
+          darkmatter_write_particles(gparts_written, list, &num_fields);
+          if (with_stf) {
+#ifdef HAVE_VELOCIRAPTOR
+            num_fields += velociraptor_write_gparts(gpart_group_data_written,
+                                                    list + num_fields);
+#endif
+          }
+        }
+      } break;
+
+      case swift_type_stars: {
+        if (Nstars == Nstars_written) {
+
+          /* No inhibted particles: easy case */
+          Nparticles = Nstars;
+          stars_write_particles(sparts, list, &num_fields);
+          num_fields += chemistry_write_sparticles(sparts, list + num_fields);
+          num_fields += tracers_write_sparticles(sparts, list + num_fields,
+                                                 with_cosmology);
+          if (with_stf) {
+            num_fields += velociraptor_write_sparts(sparts, list + num_fields);
+          }
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          Nparticles = Nstars_written;
+
+          /* Allocate temporary arrays */
+          if (posix_memalign((void**)&sparts_written, spart_align,
+                             Nstars_written * sizeof(struct spart)) != 0)
+            error("Error while allocating temporart memory for sparts");
+
+          /* Collect the particles we want to write */
+          io_collect_sparts_to_write(sparts, sparts_written, Nstars,
+                                     Nstars_written);
+
+          /* Select the fields to write */
+          stars_write_particles(sparts_written, list, &num_fields);
+          num_fields += chemistry_write_sparticles(sparts, list + num_fields);
+          num_fields += tracers_write_sparticles(sparts, list + num_fields,
+                                                 with_cosmology);
+          if (with_stf) {
+            num_fields +=
+                velociraptor_write_sparts(sparts_written, list + num_fields);
+          }
+        }
+      } break;
 
       default:
         error("Particle Type %d not yet supported. Aborting", ptype);
@@ -1383,10 +1626,11 @@ void write_output_parallel(struct engine* e, const char* baseName,
     }
 
     /* Free temporary array */
-    if (dmparts) {
-      free(dmparts);
-      dmparts = 0;
-    }
+    if (parts_written) free(parts_written);
+    if (xparts_written) free(xparts_written);
+    if (gparts_written) free(gparts_written);
+    if (gpart_group_data_written) free(gpart_group_data_written);
+    if (sparts_written) free(sparts_written);
 
 #ifdef IO_SPEED_MEASUREMENT
     MPI_Barrier(MPI_COMM_WORLD);
diff --git a/src/parallel_io.h b/src/parallel_io.h
index 668b6f83443fe4c39ddf3269c8d2236e72588e32..9cd775347f0d5fbb3bc1b17664e0d5dba734d795 100644
--- a/src/parallel_io.h
+++ b/src/parallel_io.h
@@ -25,22 +25,21 @@
 #if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
 
 /* MPI headers. */
-#ifdef WITH_MPI
 #include <mpi.h>
-#endif
 
 /* Includes. */
 #include "engine.h"
+#include "io_properties.h"
 #include "part.h"
 #include "units.h"
 
 void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
                       double dim[3], struct part** parts, struct gpart** gparts,
                       struct spart** sparts, size_t* Ngas, size_t* Ngparts,
-                      size_t* Nsparts, int* periodic, int* flag_entropy,
-                      int with_hydro, int with_gravity, int with_stars,
-                      int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                      int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info,
+                      size_t* Nsparts, int* flag_entropy, int with_hydro,
+                      int with_gravity, int with_stars, int cleanup_h,
+                      int cleanup_sqrt_a, double h, double a, int mpi_rank,
+                      int mpi_size, MPI_Comm comm, MPI_Info info,
                       int nr_threads, int dry_run);
 
 void write_output_parallel(struct engine* e, const char* baseName,
@@ -48,6 +47,13 @@ void write_output_parallel(struct engine* e, const char* baseName,
                            const struct unit_system* snapshot_units,
                            int mpi_rank, int mpi_size, MPI_Comm comm,
                            MPI_Info info);
+
+void writeArray(struct engine* e, hid_t grp, char* fileName,
+                char* partTypeGroupName, struct io_props props, size_t N,
+                long long N_total, int mpi_rank, long long offset,
+                const struct unit_system* internal_units,
+                const struct unit_system* snapshot_units);
+
 #endif
 
 #endif /* SWIFT_PARALLEL_IO_H */
diff --git a/src/parser.c b/src/parser.c
index d804be507e81ca265b31a6a2699d4f0b998f7c3b..57592d57abb78100d113b91710af68f7b1c3e32d 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -35,6 +35,7 @@
 #include "error.h"
 #include "restart.h"
 #include "tools.h"
+#include "version.h"
 
 #define PARSER_COMMENT_STRING "#"
 #define PARSER_COMMENT_CHAR '#'
@@ -363,7 +364,12 @@ static void parse_line(char *line, struct swift_params *params) {
 
       /* Check if the line contains a value and parse it. */
       if (strchr(trim_line, PARSER_VALUE_CHAR)) {
-        parse_value(trim_line, params);
+
+        /* Trim trailing space before parsing line for a value. */
+        char no_space_line[PARSER_MAX_LINE_SIZE];
+        strcpy(no_space_line, trim_trailing(trim_line));
+
+        parse_value(no_space_line, params);
       }
       /* Check for invalid lines,not including the start and end of file. */
       else if (strcmp(trim_line, PARSER_START_OF_FILE) &&
@@ -1153,7 +1159,13 @@ void parser_write_params_to_file(const struct swift_params *params,
   char *token;
 
   /* Start of file identifier in YAML. */
-  fprintf(file, "%s\n", PARSER_START_OF_FILE);
+  fprintf(file, "%s\n\n", PARSER_START_OF_FILE);
+
+  fprintf(file, "# SWIFT used parameter file\n");
+  fprintf(file, "# Code version: %s\n", package_version());
+  fprintf(file, "# git revision: %s\n", git_revision());
+  fprintf(file, "# git branch: %s\n", git_branch());
+  fprintf(file, "# git date: %s\n", git_date());
 
   /* Flags to track which parameters are written. */
   int *written = (int *)calloc(params->paramCount, sizeof(int));
diff --git a/src/part.c b/src/part.c
index 050e10e9cdd0ab56adcd34ba3e6f2d35c274f14a..ec3627d728f69f469cc7d75eb2beb9ae39ed107e 100644
--- a/src/part.c
+++ b/src/part.c
@@ -26,8 +26,10 @@
 #endif
 
 /* This object's header. */
-#include "error.h"
 #include "multipole.h"
+
+/* Local headers */
+#include "error.h"
 #include "part.h"
 
 /**
@@ -88,7 +90,7 @@ void part_relink_parts_to_gparts(struct gpart *gparts, size_t N,
 void part_relink_sparts_to_gparts(struct gpart *gparts, size_t N,
                                   struct spart *sparts) {
   for (size_t k = 0; k < N; k++) {
-    if (gparts[k].type == swift_type_star) {
+    if (gparts[k].type == swift_type_stars) {
       sparts[-gparts[k].id_or_neg_offset].gpart = &gparts[k];
     }
   }
@@ -108,7 +110,7 @@ void part_relink_all_parts_to_gparts(struct gpart *gparts, size_t N,
   for (size_t k = 0; k < N; k++) {
     if (gparts[k].type == swift_type_gas) {
       parts[-gparts[k].id_or_neg_offset].gpart = &gparts[k];
-    } else if (gparts[k].type == swift_type_star) {
+    } else if (gparts[k].type == swift_type_stars) {
       sparts[-gparts[k].id_or_neg_offset].gpart = &gparts[k];
     }
   }
@@ -133,10 +135,13 @@ void part_verify_links(struct part *parts, struct gpart *gparts,
                        struct spart *sparts, size_t nr_parts, size_t nr_gparts,
                        size_t nr_sparts, int verbose) {
 
+  ticks tic = getticks();
+
   for (size_t k = 0; k < nr_gparts; ++k) {
 
-    /* We have a DM particle */
-    if (gparts[k].type == swift_type_dark_matter) {
+    /* We have a real DM particle */
+    if (gparts[k].type == swift_type_dark_matter &&
+        gparts[k].time_bin != time_bin_not_created) {
 
       /* Check that it's not linked */
       if (gparts[k].id_or_neg_offset <= 0)
@@ -171,11 +176,11 @@ void part_verify_links(struct part *parts, struct gpart *gparts,
         error("Linked particles are not at the same time !");
     }
 
-    else if (gparts[k].type == swift_type_star) {
+    else if (gparts[k].type == swift_type_stars) {
 
       /* Check that it is linked */
       if (gparts[k].id_or_neg_offset > 0)
-        error("Star gpart not linked to anything !");
+        error("Stars gpart not linked to anything !");
 
       /* Find its link */
       const struct spart *spart = &sparts[-gparts[k].id_or_neg_offset];
@@ -246,6 +251,9 @@ void part_verify_links(struct part *parts, struct gpart *gparts,
   }
 
   if (verbose) message("All links OK");
+  if (verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 #ifdef WITH_MPI
@@ -254,7 +262,6 @@ MPI_Datatype part_mpi_type;
 MPI_Datatype xpart_mpi_type;
 MPI_Datatype gpart_mpi_type;
 MPI_Datatype spart_mpi_type;
-MPI_Datatype multipole_mpi_type;
 
 /**
  * @brief Registers MPI particle types.
@@ -287,11 +294,5 @@ void part_create_mpi_types(void) {
       MPI_Type_commit(&spart_mpi_type) != MPI_SUCCESS) {
     error("Failed to create MPI type for sparts.");
   }
-  if (MPI_Type_contiguous(
-          sizeof(struct gravity_tensors) / sizeof(unsigned char), MPI_BYTE,
-          &multipole_mpi_type) != MPI_SUCCESS ||
-      MPI_Type_commit(&multipole_mpi_type) != MPI_SUCCESS) {
-    error("Failed to create MPI type for multipole.");
-  }
 }
 #endif
diff --git a/src/part.h b/src/part.h
index 145bf2111771d8ad254affb213b93b7ac829f1a6..069a5075c1a8aa1037f37df2f1ce0168e1130a5f 100644
--- a/src/part.h
+++ b/src/part.h
@@ -54,6 +54,9 @@
 #elif defined(HOPKINS_PU_SPH)
 #include "./hydro/PressureEnergy/hydro_part.h"
 #define hydro_need_extra_init_loop 0
+#elif defined(HOPKINS_PU_SPH_MONAGHAN)
+#include "./hydro/PressureEnergyMorrisMonaghanAV/hydro_part.h"
+#define hydro_need_extra_init_loop 0
 #elif defined(DEFAULT_SPH)
 #include "./hydro/Default/hydro_part.h"
 #define hydro_need_extra_init_loop 0
@@ -72,6 +75,10 @@
 #elif defined(PLANETARY_SPH)
 #include "./hydro/Planetary/hydro_part.h"
 #define hydro_need_extra_init_loop 0
+#elif defined(ANARCHY_PU_SPH)
+#include "./hydro/AnarchyPU/hydro_part.h"
+#define hydro_need_extra_init_loop 0
+#define EXTRA_HYDRO_LOOP
 #else
 #error "Invalid choice of SPH variant"
 #endif
@@ -86,7 +93,15 @@
 #endif
 
 /* Import the right star particle definition */
-#include "./stars/Default/star_part.h"
+#if defined(STARS_NONE)
+#include "./stars/Default/stars_part.h"
+#elif defined(STARS_EAGLE)
+#include "./stars/EAGLE/stars_part.h"
+#elif defined(STARS_GEAR)
+#include "./stars/GEAR/stars_part.h"
+#else
+#error "Invalid choice of star particle"
+#endif
 
 void part_relink_gparts_to_parts(struct part *parts, size_t N,
                                  ptrdiff_t offset);
@@ -108,7 +123,6 @@ extern MPI_Datatype part_mpi_type;
 extern MPI_Datatype xpart_mpi_type;
 extern MPI_Datatype gpart_mpi_type;
 extern MPI_Datatype spart_mpi_type;
-extern MPI_Datatype multipole_mpi_type;
 
 void part_create_mpi_types(void);
 #endif
diff --git a/src/part_type.c b/src/part_type.c
index af97bd34aaace93a9faa953c0c9345d83ca3bc34..1f96d4ef1db4b35a92d133e91498ea10ce472c70 100644
--- a/src/part_type.c
+++ b/src/part_type.c
@@ -20,5 +20,5 @@
 /* This object's header. */
 #include "part_type.h"
 
-const char* part_type_names[swift_type_count] = {"Gas",   "DM",   "Dummy",
-                                                 "Dummy", "Star", "BH"};
+const char* part_type_names[swift_type_count] = {"Gas",   "DM",    "Dummy",
+                                                 "Dummy", "Stars", "BH"};
diff --git a/src/part_type.h b/src/part_type.h
index fbe2b2aeaea37503635372b0f09f8edde4578721..901f47193fa0e72b362c8dce5199a1d0a20526c9 100644
--- a/src/part_type.h
+++ b/src/part_type.h
@@ -27,7 +27,7 @@
 enum part_type {
   swift_type_gas = 0,
   swift_type_dark_matter = 1,
-  swift_type_star = 4,
+  swift_type_stars = 4,
   swift_type_black_hole = 5,
   swift_type_count
 } __attribute__((packed));
diff --git a/src/partition.c b/src/partition.c
index 98e3e7b670ddd9d849834f1e7f86fa94c2cd335f..606f64e4c2f1c057520ed7ff893db10905f5aa8e 100644
--- a/src/partition.c
+++ b/src/partition.c
@@ -24,7 +24,7 @@
  *  a grid of cells into geometrically connected regions and distributing
  *  these around a number of MPI nodes.
  *
- *  Currently supported partitioning types: grid, vectorise and METIS.
+ *  Currently supported partitioning types: grid, vectorise and METIS/ParMETIS.
  */
 
 /* Config parameters. */
@@ -37,10 +37,17 @@
 #include <stdlib.h>
 #include <strings.h>
 
+/* Include int min and max values. Define these limits in C++ as well. */
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
 /* MPI headers. */
 #ifdef WITH_MPI
 #include <mpi.h>
-/* METIS headers only used when MPI is also available. */
+/* METIS/ParMETIS headers only used when MPI is also available. */
+#ifdef HAVE_PARMETIS
+#include <parmetis.h>
+#endif
 #ifdef HAVE_METIS
 #include <metis.h>
 #endif
@@ -55,29 +62,32 @@
 #include "space.h"
 #include "tools.h"
 
-/* Maximum weight used for METIS. */
-#define metis_maxweight 10000.0f
-
 /* Simple descriptions of initial partition types for reports. */
 const char *initial_partition_name[] = {
-    "gridded cells", "vectorized point associated cells",
-    "METIS particle weighted cells", "METIS unweighted cells"};
+    "axis aligned grids of cells", "vectorized point associated cells",
+    "memory balanced, using particle weighted cells",
+    "similar sized regions, using unweighted cells"};
 
 /* Simple descriptions of repartition types for reports. */
 const char *repartition_name[] = {
-    "no",
-    "METIS edge and vertex task cost weights",
-    "METIS particle count vertex weights",
-    "METIS task cost edge weights",
-    "METIS particle count vertex and task cost edge weights",
-    "METIS vertex task costs and edge delta timebin weights",
-    "METIS particle count vertex and edge delta timebin weights",
-    "METIS edge delta timebin weights",
-};
+    "none", "edge and vertex task cost weights", "task cost edge weights",
+    "memory balanced, using particle vertex weights",
+    "vertex task costs and edge delta timebin weights"};
 
 /* Local functions, if needed. */
 static int check_complete(struct space *s, int verbose, int nregions);
 
+/*
+ * Repartition fixed costs per type/subtype. These are determined from the
+ * statistics output produced when running with task debugging enabled.
+ */
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+static double repartition_costs[task_type_count][task_subtype_count];
+#endif
+#if defined(WITH_MPI)
+static int repart_init_fixed_costs(void);
+#endif
+
 /*  Vectorisation support */
 /*  ===================== */
 
@@ -156,34 +166,39 @@ static void split_vector(struct space *s, int nregions, int *samplecells) {
 }
 #endif
 
-  /* METIS support
-   * =============
+  /* METIS/ParMETIS support (optional)
+   * =================================
    *
-   * METIS partitions using a multi-level k-way scheme. We support using this in
-   * a unweighted scheme, which works well and seems to be guaranteed, and a
-   * weighted by the number of particles scheme. Note METIS is optional.
+   * METIS/ParMETIS partitions using a multi-level k-way scheme. We support
+   * using this in a unweighted scheme, which works well and seems to be
+   * guaranteed, and a weighted by the number of particles scheme.
    *
-   * Repartitioning is based on METIS and uses weights determined from the times
-   * that cell tasks have taken. These weight the graph edges and vertices, or
-   * just the edges, with vertex weights from the particle counts or none.
+   * Repartitioning is based on ParMETIS and uses weights determined from the
+   * estimated costs that a cells tasks will take or the relative time bins of
+   * the cells next updates.
    */
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 /**
- * @brief Fill the METIS xadj and adjncy arrays defining the graph of cells
- *        in a space.
+ * @brief Fill the adjncy array defining the graph of cells in a space.
+ *
+ * See the ParMETIS and METIS manuals if you want to understand this
+ * format. The cell graph consists of all nodes as vertices with edges as the
+ * connections to all neighbours, so we have 26 per vertex. Note you will
+ * also need an xadj array, for METIS that would be:
  *
- * See the METIS manual if you want to understand this format. The cell graph
- * consists of all nodes as vertices with edges as the connections to all
- * neighbours, so we have 26 per vertex.
+ *   xadj[0] = 0;
+ *   for (int k = 0; k < s->nr_cells; k++) xadj[k + 1] = xadj[k] + 26;
+ *
+ * but each rank needs a different xadj when using ParMETIS.
  *
  * @param s the space of cells.
- * @param adjncy the METIS adjncy array to fill, must be of size
- *               26 * the number of cells in the space.
+ * @param adjncy the adjncy array to fill, must be of size 26 * the number of
+ *               cells in the space.
  * @param xadj the METIS xadj array to fill, must be of size
  *             number of cells in space + 1. NULL for not used.
  */
-static void graph_init_metis(struct space *s, idx_t *adjncy, idx_t *xadj) {
+static void graph_init(struct space *s, idx_t *adjncy, idx_t *xadj) {
 
   /* Loop over all cells in the space. */
   int cid = 0;
@@ -227,7 +242,7 @@ static void graph_init_metis(struct space *s, idx_t *adjncy, idx_t *xadj) {
     }
   }
 
-  /* If given set xadj. */
+  /* If given set METIS xadj. */
   if (xadj != NULL) {
     xadj[0] = 0;
     for (int k = 0; k < s->nr_cells; k++) xadj[k + 1] = xadj[k] + 26;
@@ -235,44 +250,135 @@ static void graph_init_metis(struct space *s, idx_t *adjncy, idx_t *xadj) {
 }
 #endif
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+struct counts_mapper_data {
+  double *counts;
+  size_t size;
+  struct space *s;
+};
+
+/* Generic function for accumulating sized counts for TYPE parts. Note uses
+ * local memory to reduce contention, the amount of memory required is
+ * precalculated by an additional loop determining the range of cell IDs. */
+#define ACCUMULATE_SIZES_MAPPER(TYPE)                                          \
+  accumulate_sizes_mapper_##TYPE(void *map_data, int num_elements,             \
+                                 void *extra_data) {                           \
+    struct TYPE *parts = (struct TYPE *)map_data;                              \
+    struct counts_mapper_data *mydata =                                        \
+        (struct counts_mapper_data *)extra_data;                               \
+    double size = mydata->size;                                                \
+    int *cdim = mydata->s->cdim;                                               \
+    double iwidth[3] = {mydata->s->iwidth[0], mydata->s->iwidth[1],            \
+                        mydata->s->iwidth[2]};                                 \
+    double dim[3] = {mydata->s->dim[0], mydata->s->dim[1], mydata->s->dim[2]}; \
+    double *lcounts = NULL;                                                    \
+    int lcid = mydata->s->nr_cells;                                            \
+    int ucid = 0;                                                              \
+    for (int k = 0; k < num_elements; k++) {                                   \
+      for (int j = 0; j < 3; j++) {                                            \
+        if (parts[k].x[j] < 0.0)                                               \
+          parts[k].x[j] += dim[j];                                             \
+        else if (parts[k].x[j] >= dim[j])                                      \
+          parts[k].x[j] -= dim[j];                                             \
+      }                                                                        \
+      const int cid =                                                          \
+          cell_getid(cdim, parts[k].x[0] * iwidth[0],                          \
+                     parts[k].x[1] * iwidth[1], parts[k].x[2] * iwidth[2]);    \
+      if (cid > ucid) ucid = cid;                                              \
+      if (cid < lcid) lcid = cid;                                              \
+    }                                                                          \
+    int nused = ucid - lcid + 1;                                               \
+    if ((lcounts = (double *)calloc(sizeof(double), nused)) == NULL)           \
+      error("Failed to allocate counts thread-specific buffer");               \
+    for (int k = 0; k < num_elements; k++) {                                   \
+      const int cid =                                                          \
+          cell_getid(cdim, parts[k].x[0] * iwidth[0],                          \
+                     parts[k].x[1] * iwidth[1], parts[k].x[2] * iwidth[2]);    \
+      lcounts[cid - lcid] += size;                                             \
+    }                                                                          \
+    for (int k = 0; k < nused; k++)                                            \
+      atomic_add_d(&mydata->counts[k + lcid], lcounts[k]);                     \
+    free(lcounts);                                                             \
+  }
+
 /**
- * @brief Accumulate the counts of particles per cell.
+ * @brief Accumulate the sized counts of particles per cell.
+ * Threadpool helper for accumulating the counts of particles per cell.
  *
- * @param s the space containing the cells.
- * @param counts the number of particles per cell. Should be
- *               allocated as size s->nr_parts.
+ * part version.
+ */
+static void ACCUMULATE_SIZES_MAPPER(part);
+
+/**
+ * @brief Accumulate the sized counts of particles per cell.
+ * Threadpool helper for accumulating the counts of particles per cell.
+ *
+ * gpart version.
+ */
+static void ACCUMULATE_SIZES_MAPPER(gpart);
+
+/**
+ * @brief Accumulate the sized counts of particles per cell.
+ * Threadpool helper for accumulating the counts of particles per cell.
+ *
+ * spart version.
  */
-static void accumulate_counts(struct space *s, double *counts) {
+static void ACCUMULATE_SIZES_MAPPER(spart);
 
-  struct part *parts = s->parts;
-  int *cdim = s->cdim;
-  double iwidth[3] = {s->iwidth[0], s->iwidth[1], s->iwidth[2]};
-  double dim[3] = {s->dim[0], s->dim[1], s->dim[2]};
+/**
+ * @brief Accumulate total memory size in particles per cell.
+ *
+ * @param s the space containing the cells.
+ * @param counts the number of bytes in particles per cell. Should be
+ *               allocated as size s->nr_cells.
+ */
+static void accumulate_sizes(struct space *s, double *counts) {
 
   bzero(counts, sizeof(double) * s->nr_cells);
 
-  for (size_t k = 0; k < s->nr_parts; k++) {
-    for (int j = 0; j < 3; j++) {
-      if (parts[k].x[j] < 0.0)
-        parts[k].x[j] += dim[j];
-      else if (parts[k].x[j] >= dim[j])
-        parts[k].x[j] -= dim[j];
-    }
-    const int cid =
-        cell_getid(cdim, parts[k].x[0] * iwidth[0], parts[k].x[1] * iwidth[1],
-                   parts[k].x[2] * iwidth[2]);
-    counts[cid]++;
+  struct counts_mapper_data mapper_data;
+  mapper_data.counts = counts;
+  mapper_data.s = s;
+
+  double hsize = (double)sizeof(struct part);
+  if (s->nr_parts > 0) {
+    mapper_data.size = hsize;
+    threadpool_map(&s->e->threadpool, accumulate_sizes_mapper_part, s->parts,
+                   s->nr_parts, sizeof(struct part), space_splitsize,
+                   &mapper_data);
+  }
+
+  double gsize = (double)sizeof(struct gpart);
+  if (s->nr_gparts > 0) {
+    mapper_data.size = gsize;
+    threadpool_map(&s->e->threadpool, accumulate_sizes_mapper_gpart, s->gparts,
+                   s->nr_gparts, sizeof(struct gpart), space_splitsize,
+                   &mapper_data);
+  }
+
+  double ssize = (double)sizeof(struct spart);
+  if (s->nr_sparts > 0) {
+    mapper_data.size = ssize;
+    threadpool_map(&s->e->threadpool, accumulate_sizes_mapper_spart, s->sparts,
+                   s->nr_sparts, sizeof(struct spart), space_splitsize,
+                   &mapper_data);
+  }
+
+  /* Keep the sum of particles across all ranks in the range of IDX_MAX. */
+  if ((s->e->total_nr_parts * hsize + s->e->total_nr_gparts * gsize +
+       s->e->total_nr_sparts * ssize) > (double)IDX_MAX) {
+    double vscale =
+        (double)(IDX_MAX - 1000) /
+        (double)(s->e->total_nr_parts * hsize + s->e->total_nr_gparts * gsize +
+                 s->e->total_nr_sparts * ssize);
+    for (int k = 0; k < s->nr_cells; k++) counts[k] *= vscale;
   }
 }
 #endif
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 /**
- * @brief Apply METIS cell list partitioning to a cell structure.
- *
- * Uses the results of part_metis_pick to assign each cell's nodeID to the
- * picked region index, thus partitioning the space into regions.
+ * @brief Apply METIS cell-list partitioning to a cell structure.
  *
  * @param s the space containing the cells to split into regions.
  * @param nregions number of regions.
@@ -283,16 +389,18 @@ static void split_metis(struct space *s, int nregions, int *celllist) {
   for (int i = 0; i < s->nr_cells; i++) s->cells_top[i].nodeID = celllist[i];
 
   /* To check or visualise the partition dump all the cells. */
-  /* dumpCellRanks("metis_partition", s->cells_top, s->nr_cells); */
+  /*dumpCellRanks("metis_partition", s->cells_top, s->nr_cells);*/
 }
 #endif
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 
 /* qsort support. */
 struct indexval {
   int index;
   int count;
+  int old_val;
+  int new_val;
 };
 static int indexvalcmp(const void *p1, const void *p2) {
   const struct indexval *iv1 = (const struct indexval *)p1;
@@ -301,12 +409,107 @@ static int indexvalcmp(const void *p1, const void *p2) {
 }
 
 /**
- * @brief Partition the given space into a number of connected regions.
+ * @brief Check if there is a permutation of the region indices of our cells
+ *        that will reduce the amount of particle movement and return it.
  *
- * Split the space using METIS to derive a partitions using the
+ * @param newlist the new list of regions for our cells.
+ * @param oldlist the old list of regions for our cells.
+ * @param nregions the number of regions.
+ * @param ncells the number of cells.
+ * @param permlist the permutation of the newlist.
+ */
+void permute_regions(int *newlist, int *oldlist, int nregions, int ncells,
+                     int *permlist) {
+
+  /* We want a solution in which the current region assignments of the cells
+   * are preserved when possible, to avoid unneccesary particle movement.  So
+   * create a 2d-array of counts of cells that are common to all pairs of old
+   * and new lists. Each element of the array has a count of cells and an
+   * unique index so we can sort into decreasing counts.
+   */
+  int indmax = nregions * nregions;
+  struct indexval *ivs = NULL;
+  if ((ivs = (struct indexval *)malloc(sizeof(struct indexval) * indmax)) ==
+      NULL)
+    error("Failed to allocate ivs structs");
+  bzero(ivs, sizeof(struct indexval) * indmax);
+
+  for (int k = 0; k < ncells; k++) {
+    int index = newlist[k] + nregions * oldlist[k];
+    ivs[index].count++;
+    ivs[index].index = index;
+    ivs[index].old_val = oldlist[k];
+    ivs[index].new_val = newlist[k];
+  }
+  qsort(ivs, indmax, sizeof(struct indexval), indexvalcmp);
+
+  /* Go through the ivs using the largest counts first, these are the
+   * regions with the most cells in common, old partition to new. If not
+   * returning the permutation, avoid the associated work. */
+  int *oldmap = NULL;
+  int *newmap = NULL;
+  oldmap = permlist; /* Reuse this */
+  if ((newmap = (int *)malloc(sizeof(int) * nregions)) == NULL)
+    error("Failed to allocate newmap array");
+
+  for (int k = 0; k < nregions; k++) {
+    oldmap[k] = -1;
+    newmap[k] = -1;
+  }
+
+  for (int k = 0; k < indmax; k++) {
+
+    /* Stop when all regions with common cells have been considered. */
+    if (ivs[k].count == 0) break;
+
+    /* Store old and new IDs, if not already used. */
+    if (newmap[ivs[k].new_val] == -1 && oldmap[ivs[k].old_val] == -1) {
+      newmap[ivs[k].new_val] = ivs[k].old_val;
+      oldmap[ivs[k].old_val] = ivs[k].new_val;
+    }
+  }
+
+  /* Handle any regions that did not get selected by picking an unused rank
+   * from oldmap and assigning to newmap. */
+  int spare = 0;
+  for (int k = 0; k < nregions; k++) {
+    if (newmap[k] == -1) {
+      for (int j = spare; j < nregions; j++) {
+        if (oldmap[j] == -1) {
+          newmap[k] = j;
+          oldmap[j] = j;
+          spare = j;
+          break;
+        }
+      }
+    }
+  }
+
+  /* Permute the newlist into this order. */
+  for (int k = 0; k < ncells; k++) {
+    permlist[k] = newmap[newlist[k]];
+  }
+  free(newmap);
+  free(ivs);
+}
+#endif
+
+#if defined(WITH_MPI) && defined(HAVE_PARMETIS)
+/**
+ * @brief Partition the given space into a number of connected regions using
+ *        ParMETIS.
+ *
+ * Split the space using PARMETIS to derive a partitions using the
  * given edge and vertex weights. If no weights are given then an
- * unweighted partition is performed.
+ * unweighted partition is performed. If refine is set then an existing
+ * partition is assumed to be present from the last call to this routine
+ * in the celllist argument, that will get a refined partition, not a new
+ * one.
  *
+ * Assumes MPI is up and running and the number of ranks is the same as the
+ * number of regions.
+ *
+ * @param nodeID our nodeID.
  * @param s the space of cells to partition.
  * @param nregions the number of regions required in the partition.
  * @param vertexw weights for the cells, sizeof number of cells if used,
@@ -315,256 +518,714 @@ static int indexvalcmp(const void *p1, const void *p2) {
  *        of cells * 26 if used, NULL for unit weights. Need to be packed
  *        in CSR format, so same as adjncy array. Need to be in the range of
  *        idx_t.
+ * @param refine whether to refine an existing partition, or create a new one.
+ * @param adaptive whether to use an adaptive reparitition of an existing
+ *        partition or simple refinement. Adaptive repartition is controlled
+ *        by the itr parameter.
+ * @param itr the ratio of inter-process communication time to data
+ *            redistribution time. Used to weight repartitioning edge cuts
+ *            when refine and adaptive are true.
  * @param celllist on exit this contains the ids of the selected regions,
- *        sizeof number of cells.
+ *        size of number of cells. If refine is 1, then this should contain
+ *        the old partition on entry.
  */
-static void pick_metis(struct space *s, int nregions, double *vertexw,
-                       double *edgew, int *celllist) {
+static void pick_parmetis(int nodeID, struct space *s, int nregions,
+                          double *vertexw, double *edgew, int refine,
+                          int adaptive, float itr, int *celllist) {
+
+  int res;
+  MPI_Comm comm;
+  MPI_Comm_dup(MPI_COMM_WORLD, &comm);
 
   /* Total number of cells. */
   int ncells = s->cdim[0] * s->cdim[1] * s->cdim[2];
 
-  /* Nothing much to do if only using a single partition. Also avoids METIS
-   * bug that doesn't handle this case well. */
+  /* Nothing much to do if only using a single MPI rank. */
   if (nregions == 1) {
     for (int i = 0; i < ncells; i++) celllist[i] = 0;
     return;
   }
 
-  /* Allocate weights and adjacency arrays . */
-  idx_t *xadj;
-  if ((xadj = (idx_t *)malloc(sizeof(idx_t) * (ncells + 1))) == NULL)
+  /* We all get one of these with the same content. It defines the ranges of
+   * vertices that are found on each rank. This contiguity constraint seems to
+   * stop efficient local processing, since our cell distributions do not
+   * meet this requirement. That means the graph and related information needs
+   * to be all brought to one node and redistributed for processing in
+   * approproiate batches. */
+  idx_t *vtxdist;
+  if ((vtxdist = (idx_t *)malloc(sizeof(idx_t) * (nregions + 1))) == NULL)
+    error("Failed to allocate vtxdist buffer.");
+
+  if (nodeID == 0) {
+
+    /* Construct vtxdist and send it to all ranks. Each rank gets an equal
+     * number of vertices. */
+    vtxdist[0] = 0;
+    int k = ncells;
+    for (int i = 0; i < nregions; i++) {
+      int l = k / (nregions - i);
+      vtxdist[i + 1] = vtxdist[i] + l;
+      k -= l;
+    }
+    res = MPI_Bcast((void *)vtxdist, nregions + 1, IDX_T, 0, comm);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to broadcast vtxdist.");
+
+  } else {
+    res = MPI_Bcast((void *)vtxdist, nregions + 1, IDX_T, 0, comm);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to broadcast vtxdist.");
+  }
+
+  /* Number of cells on this node and space for the expected arrays. */
+  int nverts = vtxdist[nodeID + 1] - vtxdist[nodeID];
+
+  idx_t *xadj = NULL;
+  if ((xadj = (idx_t *)malloc(sizeof(idx_t) * (nverts + 1))) == NULL)
     error("Failed to allocate xadj buffer.");
-  idx_t *adjncy;
-  if ((adjncy = (idx_t *)malloc(sizeof(idx_t) * 26 * ncells)) == NULL)
+
+  idx_t *adjncy = NULL;
+  if ((adjncy = (idx_t *)malloc(sizeof(idx_t) * 26 * nverts)) == NULL)
     error("Failed to allocate adjncy array.");
+
   idx_t *weights_v = NULL;
   if (vertexw != NULL)
-    if ((weights_v = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+    if ((weights_v = (idx_t *)malloc(sizeof(idx_t) * nverts)) == NULL)
       error("Failed to allocate vertex weights array");
+
   idx_t *weights_e = NULL;
   if (edgew != NULL)
-    if ((weights_e = (idx_t *)malloc(26 * sizeof(idx_t) * ncells)) == NULL)
+    if ((weights_e = (idx_t *)malloc(26 * sizeof(idx_t) * nverts)) == NULL)
       error("Failed to allocate edge weights array");
-  idx_t *regionid;
-  if ((regionid = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+
+  idx_t *regionid = NULL;
+  if ((regionid = (idx_t *)malloc(sizeof(idx_t) * (nverts + 1))) == NULL)
     error("Failed to allocate regionid array");
 
-  /* Define the cell graph. */
-  graph_init_metis(s, adjncy, xadj);
+  /* Prepare MPI requests for the asynchronous communications */
+  MPI_Request *reqs;
+  if ((reqs = (MPI_Request *)malloc(sizeof(MPI_Request) * 5 * nregions)) ==
+      NULL)
+    error("Failed to allocate MPI request list.");
+  for (int k = 0; k < 5 * nregions; k++) reqs[k] = MPI_REQUEST_NULL;
 
-  /* Init the vertex weights array. */
-  if (vertexw != NULL) {
-    for (int k = 0; k < ncells; k++) {
-      if (vertexw[k] > 1) {
-        weights_v[k] = vertexw[k];
-      } else {
-        weights_v[k] = 1;
+  MPI_Status *stats;
+  if ((stats = (MPI_Status *)malloc(sizeof(MPI_Status) * 5 * nregions)) == NULL)
+    error("Failed to allocate MPI status list.");
+
+  /* Only use one rank to organize everything. */
+  if (nodeID == 0) {
+
+    /* Space for largest lists. */
+    idx_t *full_xadj = NULL;
+    if ((full_xadj =
+             (idx_t *)malloc(sizeof(idx_t) * (ncells + nregions + 1))) == NULL)
+      error("Failed to allocate xadj buffer.");
+    idx_t *full_adjncy = NULL;
+    if ((full_adjncy = (idx_t *)malloc(sizeof(idx_t) * 26 * ncells)) == NULL)
+      error("Failed to allocate adjncy array.");
+    idx_t *full_weights_v = NULL;
+    if (weights_v != NULL)
+      if ((full_weights_v = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+        error("Failed to allocate vertex weights array");
+    idx_t *full_weights_e = NULL;
+    if (weights_e != NULL)
+      if ((full_weights_e = (idx_t *)malloc(26 * sizeof(idx_t) * ncells)) ==
+          NULL)
+        error("Failed to allocate edge weights array");
+
+    idx_t *full_regionid = NULL;
+    if (refine) {
+      if ((full_regionid = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+        error("Failed to allocate regionid array");
+    }
+
+    /* Define the cell graph. */
+    graph_init(s, full_adjncy, NULL);
+
+    /* xadj is set for each rank, different to serial version in that each
+     * rank starts with 0 */
+    for (int rank = 0, j = 0; rank < nregions; rank++) {
+
+      /* Number of vertices for this rank. */
+      int nvt = vtxdist[rank + 1] - vtxdist[rank];
+
+      /* Start from 0, and step forward 26 edges each value. */
+      full_xadj[j] = 0;
+      for (int k = 0; k <= nvt; k++) {
+        full_xadj[j + 1] = full_xadj[j] + 26;
+        j++;
       }
     }
 
-#ifdef SWIFT_DEBUG_CHECKS
-    /* Check weights are all in range. */
-    int failed = 0;
-    for (int k = 0; k < ncells; k++) {
-      if ((idx_t)vertexw[k] < 0) {
-        message("Input vertex weight out of range: %ld", (long)vertexw[k]);
-        failed++;
+    /* Init the vertex weights array. */
+    if (vertexw != NULL) {
+      for (int k = 0; k < ncells; k++) {
+        if (vertexw[k] > 1) {
+          full_weights_v[k] = vertexw[k];
+        } else {
+          full_weights_v[k] = 1;
+        }
       }
-      if (weights_v[k] < 1) {
-        message("Used vertex weight  out of range: %" PRIDX, weights_v[k]);
-        failed++;
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check weights are all in range. */
+      int failed = 0;
+      for (int k = 0; k < ncells; k++) {
+        if ((idx_t)vertexw[k] < 0) {
+          message("Input vertex weight out of range: %ld", (long)vertexw[k]);
+          failed++;
+        }
+        if (full_weights_v[k] < 1) {
+          message("Used vertex weight  out of range: %" PRIDX,
+                  full_weights_v[k]);
+          failed++;
+        }
       }
+      if (failed > 0) error("%d vertex weights are out of range", failed);
+#endif
     }
-    if (failed > 0) error("%d vertex weights are out of range", failed);
+
+    /* Init the edges weights array. */
+    if (edgew != NULL) {
+      for (int k = 0; k < 26 * ncells; k++) {
+        if (edgew[k] > 1) {
+          full_weights_e[k] = edgew[k];
+        } else {
+          full_weights_e[k] = 1;
+        }
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check weights are all in range. */
+      int failed = 0;
+      for (int k = 0; k < 26 * ncells; k++) {
+
+        if ((idx_t)edgew[k] < 0) {
+          message("Input edge weight out of range: %ld", (long)edgew[k]);
+          failed++;
+        }
+        if (full_weights_e[k] < 1) {
+          message("Used edge weight out of range: %" PRIDX, full_weights_e[k]);
+          failed++;
+        }
+      }
+      if (failed > 0) error("%d edge weights are out of range", failed);
 #endif
-  }
+    }
+
+    /* Dump graphs to disk files for testing. ParMETIS xadj isn't right for
+     * a dump, so make a serial-like version. */
+    /*{
+      idx_t *tmp_xadj =
+          (idx_t *)malloc(sizeof(idx_t) * (ncells + nregions + 1));
+      tmp_xadj[0] = 0;
+      for (int k = 0; k < ncells; k++) tmp_xadj[k + 1] = tmp_xadj[k] + 26;
+      dumpMETISGraph("parmetis_graph", ncells, 1, tmp_xadj, full_adjncy,
+                     full_weights_v, NULL, full_weights_e);
+      free(tmp_xadj);
+      }*/
+
+    /* Send ranges to the other ranks and keep our own. */
+    for (int rank = 0, j1 = 0, j2 = 0, j3 = 0; rank < nregions; rank++) {
+      int nvt = vtxdist[rank + 1] - vtxdist[rank];
+
+      if (refine)
+        for (int i = 0; i < nvt; i++) full_regionid[j3 + i] = celllist[j3 + i];
+
+      if (rank == 0) {
+        memcpy(xadj, &full_xadj[j1], sizeof(idx_t) * (nvt + 1));
+        memcpy(adjncy, &full_adjncy[j2], sizeof(idx_t) * nvt * 26);
+        if (weights_e != NULL)
+          memcpy(weights_e, &full_weights_e[j2], sizeof(idx_t) * nvt * 26);
+        if (weights_v != NULL)
+          memcpy(weights_v, &full_weights_v[j3], sizeof(idx_t) * nvt);
+        if (refine) memcpy(regionid, full_regionid, sizeof(idx_t) * nvt);
 
-  /* Init the edges weights array. */
-  if (edgew != NULL) {
-    for (int k = 0; k < 26 * ncells; k++) {
-      if (edgew[k] > 1) {
-        weights_e[k] = edgew[k];
       } else {
-        weights_e[k] = 1;
+        res = MPI_Isend(&full_xadj[j1], nvt + 1, IDX_T, rank, 0, comm,
+                        &reqs[5 * rank + 0]);
+        if (res == MPI_SUCCESS)
+          res = MPI_Isend(&full_adjncy[j2], nvt * 26, IDX_T, rank, 1, comm,
+                          &reqs[5 * rank + 1]);
+        if (res == MPI_SUCCESS && weights_e != NULL)
+          res = MPI_Isend(&full_weights_e[j2], nvt * 26, IDX_T, rank, 2, comm,
+                          &reqs[5 * rank + 2]);
+        if (res == MPI_SUCCESS && weights_v != NULL)
+          res = MPI_Isend(&full_weights_v[j3], nvt, IDX_T, rank, 3, comm,
+                          &reqs[5 * rank + 3]);
+        if (refine && res == MPI_SUCCESS)
+          res = MPI_Isend(&full_regionid[j3], nvt, IDX_T, rank, 4, comm,
+                          &reqs[5 * rank + 4]);
+        if (res != MPI_SUCCESS) mpi_error(res, "Failed to send graph data");
       }
+      j1 += nvt + 1;
+      j2 += nvt * 26;
+      j3 += nvt;
     }
 
-#ifdef SWIFT_DEBUG_CHECKS
-    /* Check weights are all in range. */
-    int failed = 0;
-    for (int k = 0; k < 26 * ncells; k++) {
-
-      if ((idx_t)edgew[k] < 0) {
-        message("Input edge weight out of range: %ld", (long)edgew[k]);
-        failed++;
+    /* Wait for all sends to complete. */
+    int result;
+    if ((result = MPI_Waitall(5 * nregions, reqs, stats)) != MPI_SUCCESS) {
+      for (int k = 0; k < 5 * nregions; k++) {
+        char buff[MPI_MAX_ERROR_STRING];
+        MPI_Error_string(stats[k].MPI_ERROR, buff, &result);
+        message("send request from source %i, tag %i has error '%s'.",
+                stats[k].MPI_SOURCE, stats[k].MPI_TAG, buff);
       }
-      if (weights_e[k] < 1) {
-        message("Used edge weight out of range: %" PRIDX, weights_e[k]);
-        failed++;
+      error("Failed during waitall sending repartition data.");
+    }
+
+    /* Clean up. */
+    if (weights_v != NULL) free(full_weights_v);
+    if (weights_e != NULL) free(full_weights_e);
+    free(full_xadj);
+    free(full_adjncy);
+    if (refine) free(full_regionid);
+
+  } else {
+
+    /* Receive stuff from rank 0. */
+    res = MPI_Irecv(xadj, nverts + 1, IDX_T, 0, 0, comm, &reqs[0]);
+    if (res == MPI_SUCCESS)
+      res = MPI_Irecv(adjncy, nverts * 26, IDX_T, 0, 1, comm, &reqs[1]);
+    if (res == MPI_SUCCESS && weights_e != NULL)
+      res = MPI_Irecv(weights_e, nverts * 26, IDX_T, 0, 2, comm, &reqs[2]);
+    if (res == MPI_SUCCESS && weights_v != NULL)
+      res = MPI_Irecv(weights_v, nverts, IDX_T, 0, 3, comm, &reqs[3]);
+    if (refine && res == MPI_SUCCESS)
+      res += MPI_Irecv((void *)regionid, nverts, IDX_T, 0, 4, comm, &reqs[4]);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to receive graph data");
+
+    /* Wait for all recvs to complete. */
+    int result;
+    if ((result = MPI_Waitall(5, reqs, stats)) != MPI_SUCCESS) {
+      for (int k = 0; k < 5; k++) {
+        char buff[MPI_MAX_ERROR_STRING];
+        MPI_Error_string(stats[k].MPI_ERROR, buff, &result);
+        message("recv request from source %i, tag %i has error '%s'.",
+                stats[k].MPI_SOURCE, stats[k].MPI_TAG, buff);
       }
+      error("Failed during waitall receiving repartition data.");
     }
-    if (failed > 0) error("%d edge weights are out of range", failed);
-#endif
   }
 
-  /* Set the METIS options. */
-  idx_t options[METIS_NOPTIONS];
-  METIS_SetDefaultOptions(options);
-  options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
-  options[METIS_OPTION_NUMBERING] = 0;
-  options[METIS_OPTION_CONTIG] = 1;
-  options[METIS_OPTION_NCUTS] = 10;
-  options[METIS_OPTION_NITER] = 20;
-
-  /* Call METIS. */
-  idx_t one = 1;
-  idx_t idx_ncells = ncells;
-  idx_t idx_nregions = nregions;
-  idx_t objval;
-
-  /* Dump graph in METIS format */
-  /*dumpMETISGraph("metis_graph", idx_ncells, one, xadj, adjncy,
-   *               weights_v, NULL, weights_e);
-   */
-  if (METIS_PartGraphKway(&idx_ncells, &one, xadj, adjncy, weights_v, NULL,
-                          weights_e, &idx_nregions, NULL, NULL, options,
-                          &objval, regionid) != METIS_OK)
-    error("Call to METIS_PartGraphKway failed.");
-
-  /* Check that the regionids are ok. */
-  for (int k = 0; k < ncells; k++)
-    if (regionid[k] < 0 || regionid[k] >= nregions)
-      error("Got bad nodeID %" PRIDX " for cell %i.", regionid[k], k);
-
-  /* We want a solution in which the current regions of the space are
-   * preserved when possible, to avoid unneccesary particle movement.
-   * So create a 2d-array of cells counts that are common to all pairs
-   * of old and new ranks. Each element of the array has a cell count and
-   * an unique index so we can sort into decreasing counts. */
-  int indmax = nregions * nregions;
-  struct indexval *ivs =
-      (struct indexval *)malloc(sizeof(struct indexval) * indmax);
-  bzero(ivs, sizeof(struct indexval) * indmax);
-  for (int k = 0; k < ncells; k++) {
-    int index = regionid[k] + nregions * s->cells_top[k].nodeID;
-    ivs[index].count++;
-    ivs[index].index = index;
-  }
-  qsort(ivs, indmax, sizeof(struct indexval), indexvalcmp);
+  /* Set up the tpwgts array. This is just 1/nregions. */
+  real_t *tpwgts;
+  if ((tpwgts = (real_t *)malloc(sizeof(real_t) * nregions)) == NULL)
+    error("Failed to allocate tpwgts array");
+  for (int i = 0; i < nregions; i++) tpwgts[i] = 1.0 / (real_t)nregions;
+
+  /* Common parameters. */
+  idx_t options[4];
+  options[0] = 1;
+  options[1] = 0;
+
+  idx_t edgecut;
+  idx_t ncon = 1;
+  idx_t nparts = nregions;
+  idx_t numflag = 0;
+  idx_t wgtflag = 0;
+  if (edgew != NULL) wgtflag += 1;
+  if (vertexw != NULL) wgtflag += 2;
+
+  real_t ubvec[1];
+  ubvec[0] = 1.001;
+
+  if (refine) {
+    /* Refine an existing partition, uncouple as we do not have the cells
+     * present on their expected ranks. */
+    options[3] = PARMETIS_PSR_UNCOUPLED;
+
+    /* Seed for randoms. */
+    options[2] = clocks_random_seed();
+
+    /* Choice is whether to use an adaptive repartition or a simple
+     * refinement. */
+    if (adaptive) {
+
+      /* Balance between cuts and movement. */
+      real_t itr_real_t = itr;
+      if (ParMETIS_V3_AdaptiveRepart(
+              vtxdist, xadj, adjncy, weights_v, NULL, weights_e, &wgtflag,
+              &numflag, &ncon, &nparts, tpwgts, ubvec, &itr_real_t, options,
+              &edgecut, regionid, &comm) != METIS_OK)
+        error("Call to ParMETIS_V3_AdaptiveRepart failed.");
+    } else {
+      if (ParMETIS_V3_RefineKway(vtxdist, xadj, adjncy, weights_v, weights_e,
+                                 &wgtflag, &numflag, &ncon, &nparts, tpwgts,
+                                 ubvec, options, &edgecut, regionid,
+                                 &comm) != METIS_OK)
+        error("Call to ParMETIS_V3_RefineKway failed.");
+    }
+  } else {
 
-  /* Go through the ivs using the largest counts first, these are the
-   * regions with the most cells in common, old partition to new. */
-  int *oldmap = (int *)malloc(sizeof(int) * nregions);
-  int *newmap = (int *)malloc(sizeof(int) * nregions);
-  for (int k = 0; k < nregions; k++) {
-    oldmap[k] = -1;
-    newmap[k] = -1;
+    /* Create a new partition. Use a number of guesses as that is similar to
+     * the way that serial METIS works (serial METIS usually gives the best
+     * quality partitions). */
+    idx_t best_edgecut = 0;
+    idx_t *best_regionid = NULL;
+    if ((best_regionid = (idx_t *)malloc(sizeof(idx_t) * (nverts + 1))) == NULL)
+      error("Failed to allocate best_regionid array");
+
+    for (int i = 0; i < 10; i++) {
+      options[2] = clocks_random_seed();
+
+      if (ParMETIS_V3_PartKway(vtxdist, xadj, adjncy, weights_v, weights_e,
+                               &wgtflag, &numflag, &ncon, &nparts, tpwgts,
+                               ubvec, options, &edgecut, regionid,
+                               &comm) != METIS_OK)
+        error("Call to ParMETIS_V3_PartKway failed.");
+
+      if (i == 0 || (best_edgecut > edgecut)) {
+        best_edgecut = edgecut;
+        memcpy(best_regionid, regionid, sizeof(idx_t) * (nverts + 1));
+      }
+    }
+
+    /* Keep the best edgecut. */
+    memcpy(regionid, best_regionid, sizeof(idx_t) * (nverts + 1));
+    free(best_regionid);
   }
-  for (int k = 0; k < indmax; k++) {
 
-    /* Stop when all regions with common cells have been considered. */
-    if (ivs[k].count == 0) break;
+  /* Need to gather all the regionid arrays from the ranks. */
+  for (int k = 0; k < nregions; k++) reqs[k] = MPI_REQUEST_NULL;
 
-    /* Store old and new IDs, if not already used. */
-    int oldregion = ivs[k].index / nregions;
-    int newregion = ivs[k].index - oldregion * nregions;
-    if (newmap[newregion] == -1 && oldmap[oldregion] == -1) {
-      newmap[newregion] = oldregion;
-      oldmap[oldregion] = newregion;
+  if (nodeID != 0) {
+
+    /* Send our regions to node 0. */
+    res = MPI_Isend(regionid, vtxdist[nodeID + 1] - vtxdist[nodeID], IDX_T, 0,
+                    1, comm, &reqs[0]);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to send new regionids");
+
+    /* Wait for send to complete. */
+    int err;
+    if ((err = MPI_Wait(reqs, stats)) != MPI_SUCCESS) {
+      mpi_error(err, "Failed during wait sending regionids.");
     }
-  }
 
-  /* Handle any regions that did not get selected by picking an unused rank
-   * from oldmap and assigning to newmap. */
-  int spare = 0;
-  for (int k = 0; k < nregions; k++) {
-    if (newmap[k] == -1) {
-      for (int j = spare; j < nregions; j++) {
-        if (oldmap[j] == -1) {
-          newmap[k] = j;
-          oldmap[j] = j;
-          spare = j;
-          break;
-        }
+  } else {
+
+    /* Node 0 */
+    idx_t *remoteids = NULL;
+    if ((remoteids = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+      error("Failed to allocate remoteids buffer");
+
+    int nvt = vtxdist[1] - vtxdist[0];
+    memcpy(remoteids, regionid, sizeof(idx_t) * nvt);
+
+    /* Receive from other ranks. */
+    for (int rank = 1, j = nvt; rank < nregions; rank++) {
+      nvt = vtxdist[rank + 1] - vtxdist[rank];
+      res = MPI_Irecv((void *)&remoteids[j], nvt, IDX_T, rank, 1, comm,
+                      &reqs[rank]);
+      if (res != MPI_SUCCESS) mpi_error(res, "Failed to receive new regionids");
+      j += nvt;
+    }
+
+    int err;
+    if ((err = MPI_Waitall(nregions, reqs, stats)) != MPI_SUCCESS) {
+      for (int k = 0; k < 5; k++) {
+        char buff[MPI_MAX_ERROR_STRING];
+        MPI_Error_string(stats[k].MPI_ERROR, buff, &err);
+        message("recv request from source %i, tag %i has error '%s'.",
+                stats[k].MPI_SOURCE, stats[k].MPI_TAG, buff);
       }
+      error("Failed during waitall receiving regionid data.");
     }
-  }
 
-  /* Set the cell list to the region index. */
-  for (int k = 0; k < ncells; k++) {
-    celllist[k] = newmap[regionid[k]];
+    /* Copy: idx_t -> int. */
+    int *newcelllist = NULL;
+    if ((newcelllist = (int *)malloc(sizeof(int) * ncells)) == NULL)
+      error("Failed to allocate new celllist");
+    for (int k = 0; k < ncells; k++) newcelllist[k] = remoteids[k];
+    free(remoteids);
+
+    /* Check that the region ids are all good. */
+    int bad = 0;
+    for (int k = 0; k < ncells; k++) {
+      if (newcelllist[k] < 0 || newcelllist[k] >= nregions) {
+        message("Got bad nodeID %" PRIDX " for cell %i.", newcelllist[k], k);
+        bad++;
+      }
+    }
+    if (bad) error("Bad node IDs located");
+
+    /* Now check the similarity to the old partition and permute if necessary.
+     * Checks show that refinement can return a permutation of the partition,
+     * we need to check that and correct as necessary. */
+    int permute = 1;
+    if (!refine) {
+
+      /* No old partition was given, so we need to construct the existing
+       * partition from the cells, if one existed. */
+      int nsum = 0;
+      for (int i = 0; i < s->nr_cells; i++) {
+        celllist[i] = s->cells_top[i].nodeID;
+        nsum += celllist[i];
+      }
+
+      /* If no previous partition then all nodeIDs will be set to 0. */
+      if (nsum == 0) permute = 0;
+    }
+
+    if (permute) {
+      int *permcelllist = NULL;
+      if ((permcelllist = (int *)malloc(sizeof(int) * ncells)) == NULL)
+        error("Failed to allocate perm celllist array");
+      permute_regions(newcelllist, celllist, nregions, ncells, permcelllist);
+
+      /* And keep. */
+      memcpy(celllist, permcelllist, sizeof(int) * ncells);
+      free(permcelllist);
+
+    } else {
+      memcpy(celllist, newcelllist, sizeof(int) * ncells);
+    }
+    free(newcelllist);
   }
 
+  /* And everyone gets a copy. */
+  res = MPI_Bcast(celllist, s->nr_cells, MPI_INT, 0, MPI_COMM_WORLD);
+  if (res != MPI_SUCCESS) mpi_error(res, "Failed to broadcast new celllist");
+
   /* Clean up. */
+  free(reqs);
+  free(stats);
   if (weights_v != NULL) free(weights_v);
   if (weights_e != NULL) free(weights_e);
-  free(ivs);
-  free(oldmap);
-  free(newmap);
+  free(vtxdist);
+  free(tpwgts);
   free(xadj);
   free(adjncy);
   free(regionid);
 }
 #endif
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 /**
- * @brief Repartition the cells amongst the nodes using task costs
- *        as edge weights and vertex weights also from task costs
- *        or particle cells counts.
+ * @brief Partition the given space into a number of connected regions.
  *
- * @param partweights whether particle counts will be used as vertex weights.
- * @param bothweights whether vertex and edge weights will be used, otherwise
- *                    only edge weights will be used.
- * @param timebins use timebins as edge weights.
- * @param nodeID our nodeID.
- * @param nr_nodes the number of nodes.
- * @param s the space of cells holding our local particles.
- * @param tasks the completed tasks from the last engine step for our node.
- * @param nr_tasks the number of tasks.
+ * Split the space using METIS to derive a partitions using the given edge and
+ * vertex weights. If no weights are given then an unweighted partition is
+ * performed.
+ *
+ * @param nodeID the rank of our node.
+ * @param s the space of cells to partition.
+ * @param nregions the number of regions required in the partition.
+ * @param vertexw weights for the cells, sizeof number of cells if used,
+ *        NULL for unit weights. Need to be in the range of idx_t.
+ * @param edgew weights for the graph edges between all cells, sizeof number
+ *        of cells * 26 if used, NULL for unit weights. Need to be packed
+ *        in CSR format, so same as adjncy array. Need to be in the range of
+ *        idx_t.
+ * @param celllist on exit this contains the ids of the selected regions,
+ *        sizeof number of cells.
  */
-static void repart_edge_metis(int partweights, int bothweights, int timebins,
-                              int nodeID, int nr_nodes, struct space *s,
-                              struct task *tasks, int nr_tasks) {
+static void pick_metis(int nodeID, struct space *s, int nregions,
+                       double *vertexw, double *edgew, int *celllist) {
 
-  /* Create weight arrays using task ticks for vertices and edges (edges
-   * assume the same graph structure as used in the part_ calls). */
-  int nr_cells = s->nr_cells;
-  struct cell *cells = s->cells_top;
+  /* Total number of cells. */
+  int ncells = s->cdim[0] * s->cdim[1] * s->cdim[2];
 
-  /* Allocate and fill the adjncy indexing array defining the graph of
-   * cells. */
-  idx_t *inds;
-  if ((inds = (idx_t *)malloc(sizeof(idx_t) * 26 * nr_cells)) == NULL)
-    error("Failed to allocate the inds array");
-  graph_init_metis(s, inds, NULL);
+  /* Nothing much to do if only using a single partition. Also avoids METIS
+   * bug that doesn't handle this case well. */
+  if (nregions == 1) {
+    for (int i = 0; i < ncells; i++) celllist[i] = 0;
+    return;
+  }
 
-  /* Allocate and init weights. */
-  double *weights_v = NULL;
-  double *weights_e = NULL;
-  if (bothweights) {
-    if ((weights_v = (double *)malloc(sizeof(double) * nr_cells)) == NULL)
-      error("Failed to allocate vertex weights arrays.");
-    bzero(weights_v, sizeof(double) * nr_cells);
+  /* Only one node needs to calculate this. */
+  if (nodeID == 0) {
+
+    /* Allocate weights and adjacency arrays . */
+    idx_t *xadj;
+    if ((xadj = (idx_t *)malloc(sizeof(idx_t) * (ncells + 1))) == NULL)
+      error("Failed to allocate xadj buffer.");
+    idx_t *adjncy;
+    if ((adjncy = (idx_t *)malloc(sizeof(idx_t) * 26 * ncells)) == NULL)
+      error("Failed to allocate adjncy array.");
+    idx_t *weights_v = NULL;
+    if (vertexw != NULL)
+      if ((weights_v = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+        error("Failed to allocate vertex weights array");
+    idx_t *weights_e = NULL;
+    if (edgew != NULL)
+      if ((weights_e = (idx_t *)malloc(26 * sizeof(idx_t) * ncells)) == NULL)
+        error("Failed to allocate edge weights array");
+    idx_t *regionid;
+    if ((regionid = (idx_t *)malloc(sizeof(idx_t) * ncells)) == NULL)
+      error("Failed to allocate regionid array");
+
+    /* Define the cell graph. */
+    graph_init(s, adjncy, xadj);
+
+    /* Init the vertex weights array. */
+    if (vertexw != NULL) {
+      for (int k = 0; k < ncells; k++) {
+        if (vertexw[k] > 1) {
+          weights_v[k] = vertexw[k];
+        } else {
+          weights_v[k] = 1;
+        }
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check weights are all in range. */
+      int failed = 0;
+      for (int k = 0; k < ncells; k++) {
+        if ((idx_t)vertexw[k] < 0) {
+          message("Input vertex weight out of range: %ld", (long)vertexw[k]);
+          failed++;
+        }
+        if (weights_v[k] < 1) {
+          message("Used vertex weight  out of range: %" PRIDX, weights_v[k]);
+          failed++;
+        }
+      }
+      if (failed > 0) error("%d vertex weights are out of range", failed);
+#endif
+    }
+
+    /* Init the edges weights array. */
+
+    if (edgew != NULL) {
+      for (int k = 0; k < 26 * ncells; k++) {
+        if (edgew[k] > 1) {
+          weights_e[k] = edgew[k];
+        } else {
+          weights_e[k] = 1;
+        }
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check weights are all in range. */
+      int failed = 0;
+      for (int k = 0; k < 26 * ncells; k++) {
+
+        if ((idx_t)edgew[k] < 0) {
+          message("Input edge weight out of range: %ld", (long)edgew[k]);
+          failed++;
+        }
+        if (weights_e[k] < 1) {
+          message("Used edge weight out of range: %" PRIDX, weights_e[k]);
+          failed++;
+        }
+      }
+      if (failed > 0) error("%d edge weights are out of range", failed);
+#endif
+    }
+
+    /* Set the METIS options. */
+    idx_t options[METIS_NOPTIONS];
+    METIS_SetDefaultOptions(options);
+    options[METIS_OPTION_OBJTYPE] = METIS_OBJTYPE_CUT;
+    options[METIS_OPTION_NUMBERING] = 0;
+    options[METIS_OPTION_CONTIG] = 1;
+    options[METIS_OPTION_NCUTS] = 10;
+    options[METIS_OPTION_NITER] = 20;
+
+    /* Call METIS. */
+    idx_t one = 1;
+    idx_t idx_ncells = ncells;
+    idx_t idx_nregions = nregions;
+    idx_t objval;
+
+    /* Dump graph in METIS format */
+    /*dumpMETISGraph("metis_graph", idx_ncells, one, xadj, adjncy, weights_v,
+      NULL, weights_e);*/
+
+    if (METIS_PartGraphKway(&idx_ncells, &one, xadj, adjncy, weights_v, NULL,
+                            weights_e, &idx_nregions, NULL, NULL, options,
+                            &objval, regionid) != METIS_OK)
+      error("Call to METIS_PartGraphKway failed.");
+
+    /* Check that the regionids are ok. */
+    for (int k = 0; k < ncells; k++) {
+      if (regionid[k] < 0 || regionid[k] >= nregions)
+        error("Got bad nodeID %" PRIDX " for cell %i.", regionid[k], k);
+
+      /* And keep. */
+      celllist[k] = regionid[k];
+    }
+
+    /* Clean up. */
+    if (weights_v != NULL) free(weights_v);
+    if (weights_e != NULL) free(weights_e);
+    free(xadj);
+    free(adjncy);
+    free(regionid);
   }
-  if ((weights_e = (double *)malloc(sizeof(double) * 26 * nr_cells)) == NULL)
-    error("Failed to allocate edge weights arrays.");
-  bzero(weights_e, sizeof(double) * 26 * nr_cells);
 
-  /* Generate task weights for vertices. */
-  int taskvweights = (bothweights && !partweights);
+  /* Calculations all done, now everyone gets a copy. */
+  int res = MPI_Bcast(celllist, ncells, MPI_INT, 0, MPI_COMM_WORLD);
+  if (res != MPI_SUCCESS) mpi_error(res, "Failed to broadcast new celllist");
+}
+#endif
 
-  /* Loop over the tasks... */
-  for (int j = 0; j < nr_tasks; j++) {
-    /* Get a pointer to the kth task. */
-    struct task *t = &tasks[j];
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+
+/* Helper struct for partition_gather weights. */
+struct weights_mapper_data {
+  double *weights_e;
+  double *weights_v;
+  idx_t *inds;
+  int eweights;
+  int nodeID;
+  int timebins;
+  int vweights;
+  int nr_cells;
+  int use_ticks;
+  struct cell *cells;
+};
+
+#ifdef SWIFT_DEBUG_CHECKS
+static void check_weights(struct task *tasks, int nr_tasks,
+                          struct weights_mapper_data *weights_data,
+                          double *weights_v, double *weights_e);
+#endif
+
+/**
+ * @brief Threadpool mapper function to gather cell edge and vertex weights
+ *        from the associated tasks.
+ *
+ * @param map_data part of the data to process in this mapper.
+ * @param num_elements the number of data elements to process.
+ * @param extra_data additional data for the mapper context.
+ */
+static void partition_gather_weights(void *map_data, int num_elements,
+                                     void *extra_data) {
+
+  struct task *tasks = (struct task *)map_data;
+  struct weights_mapper_data *mydata = (struct weights_mapper_data *)extra_data;
+
+  double *weights_e = mydata->weights_e;
+  double *weights_v = mydata->weights_v;
+  idx_t *inds = mydata->inds;
+  int eweights = mydata->eweights;
+  int nodeID = mydata->nodeID;
+  int nr_cells = mydata->nr_cells;
+  int timebins = mydata->timebins;
+  int vweights = mydata->vweights;
+  int use_ticks = mydata->use_ticks;
+
+  struct cell *cells = mydata->cells;
+
+  /* Loop over the tasks... */
+  for (int i = 0; i < num_elements; i++) {
+    struct task *t = &tasks[i];
 
     /* Skip un-interesting tasks. */
-    if (t->cost == 0.f) continue;
-
-    /* Get the task weight based on costs. */
-    double w = (double)t->cost;
+    if (t->type == task_type_send || t->type == task_type_recv ||
+        t->type == task_type_logger || t->implicit || t->ci == NULL)
+      continue;
+
+    /* Get weight for this task. Either based on fixed costs or task timings. */
+    double w = 0.0;
+    if (use_ticks) {
+      w = (double)t->toc - (double)t->tic;
+    } else {
+      w = repartition_costs[t->type][t->subtype];
+    }
+    if (w <= 0.0) continue;
 
     /* Get the top-level cells involved. */
     struct cell *ci, *cj;
@@ -583,13 +1244,14 @@ static void repart_edge_metis(int partweights, int bothweights, int timebins,
     if (t->type == task_type_drift_part || t->type == task_type_drift_gpart ||
         t->type == task_type_ghost || t->type == task_type_extra_ghost ||
         t->type == task_type_kick1 || t->type == task_type_kick2 ||
-        t->type == task_type_end_force || t->type == task_type_cooling ||
-        t->type == task_type_timestep || t->type == task_type_init_grav ||
-        t->type == task_type_grav_down ||
+        t->type == task_type_end_hydro_force ||
+        t->type == task_type_end_grav_force || t->type == task_type_cooling ||
+        t->type == task_type_star_formation || t->type == task_type_timestep ||
+        t->type == task_type_init_grav || t->type == task_type_grav_down ||
         t->type == task_type_grav_long_range) {
 
       /* Particle updates add only to vertex weight. */
-      if (taskvweights) weights_v[cid] += w;
+      if (vweights) atomic_add_d(&weights_v[cid], w);
     }
 
     /* Self interaction? */
@@ -597,7 +1259,7 @@ static void repart_edge_metis(int partweights, int bothweights, int timebins,
              (t->type == task_type_sub_self && cj == NULL &&
               ci->nodeID == nodeID)) {
       /* Self interactions add only to vertex weight. */
-      if (taskvweights) weights_v[cid] += w;
+      if (vweights) atomic_add_d(&weights_v[cid], w);
 
     }
 
@@ -606,7 +1268,7 @@ static void repart_edge_metis(int partweights, int bothweights, int timebins,
       /* In-cell pair? */
       if (ci == cj) {
         /* Add weight to vertex for ci. */
-        if (taskvweights) weights_v[cid] += w;
+        if (vweights) atomic_add_d(&weights_v[cid], w);
 
       }
 
@@ -616,238 +1278,377 @@ static void repart_edge_metis(int partweights, int bothweights, int timebins,
         int cjd = cj - cells;
 
         /* Local cells add weight to vertices. */
-        if (taskvweights && ci->nodeID == nodeID) {
-          weights_v[cid] += 0.5 * w;
-          if (cj->nodeID == nodeID) weights_v[cjd] += 0.5 * w;
+        if (vweights && ci->nodeID == nodeID) {
+          atomic_add_d(&weights_v[cid], 0.5 * w);
+          if (cj->nodeID == nodeID) atomic_add_d(&weights_v[cjd], 0.5 * w);
         }
 
-        /* Find indices of ci/cj neighbours. Note with gravity these cells may
-         * not be neighbours, in that case we ignore any edge weight for that
-         * pair. */
-        int ik = -1;
-        for (int k = 26 * cid; k < 26 * nr_cells; k++) {
-          if (inds[k] == cjd) {
-            ik = k;
-            break;
+        if (eweights) {
+
+          /* Find indices of ci/cj neighbours. Note with gravity these cells may
+           * not be neighbours, in that case we ignore any edge weight for that
+           * pair. */
+          int ik = -1;
+          for (int k = 26 * cid; k < 26 * nr_cells; k++) {
+            if (inds[k] == cjd) {
+              ik = k;
+              break;
+            }
           }
-        }
 
-        /* cj */
-        int jk = -1;
-        for (int k = 26 * cjd; k < 26 * nr_cells; k++) {
-          if (inds[k] == cid) {
-            jk = k;
-            break;
+          /* cj */
+          int jk = -1;
+          for (int k = 26 * cjd; k < 26 * nr_cells; k++) {
+            if (inds[k] == cid) {
+              jk = k;
+              break;
+            }
           }
-        }
-        if (ik != -1 && jk != -1) {
-
-          if (timebins) {
-            /* Add weights to edge for all cells based on the expected
-             * interaction time (calculated as the time to the last expected
-             * time) as we want to avoid having active cells on the edges, so
-             * we cut for that. Note that weight is added to the local and
-             * remote cells, as we want to keep both away from any cuts, this
-             * can overflow int, so take care. */
-            int dti = num_time_bins - get_time_bin(ci->ti_hydro_end_min);
-            int dtj = num_time_bins - get_time_bin(cj->ti_hydro_end_min);
-            double dt = (double)(1 << dti) + (double)(1 << dtj);
-            weights_e[ik] += dt;
-            weights_e[jk] += dt;
-
-          } else {
-
-            /* Add weights from task costs to the edge. */
-            weights_e[ik] += w;
-            weights_e[jk] += w;
+          if (ik != -1 && jk != -1) {
+
+            if (timebins) {
+              /* Add weights to edge for all cells based on the expected
+               * interaction time (calculated as the time to the last expected
+               * time) as we want to avoid having active cells on the edges, so
+               * we cut for that. Note that weight is added to the local and
+               * remote cells, as we want to keep both away from any cuts, this
+               * can overflow int, so take care. */
+              int dti = num_time_bins - get_time_bin(ci->hydro.ti_end_min);
+              int dtj = num_time_bins - get_time_bin(cj->hydro.ti_end_min);
+              double dt = (double)(1 << dti) + (double)(1 << dtj);
+              atomic_add_d(&weights_e[ik], dt);
+              atomic_add_d(&weights_e[jk], dt);
+
+            } else {
+
+              /* Add weights from task costs to the edge. */
+              atomic_add_d(&weights_e[ik], w);
+              atomic_add_d(&weights_e[jk], w);
+            }
           }
         }
       }
     }
   }
+}
+
+/**
+ * @brief Repartition the cells amongst the nodes using weights of
+ *        various kinds.
+ *
+ * @param vweights whether vertex weights will be used.
+ * @param eweights whether weights will be used.
+ * @param timebins use timebins as the edge weights.
+ * @param repartition the partition struct of the local engine.
+ * @param nodeID our nodeID.
+ * @param nr_nodes the number of nodes.
+ * @param s the space of cells holding our local particles.
+ * @param tasks the completed tasks from the last engine step for our node.
+ * @param nr_tasks the number of tasks.
+ */
+static void repart_edge_metis(int vweights, int eweights, int timebins,
+                              struct repartition *repartition, int nodeID,
+                              int nr_nodes, struct space *s, struct task *tasks,
+                              int nr_tasks) {
 
-  /* Re-calculate the vertices if using particle counts. */
-  if (partweights && bothweights) accumulate_counts(s, weights_v);
+  /* Create weight arrays using task ticks for vertices and edges (edges
+   * assume the same graph structure as used in the part_ calls). */
+  int nr_cells = s->nr_cells;
+  struct cell *cells = s->cells_top;
 
-  /* Merge the weights arrays across all nodes. */
-  int res;
-  if (bothweights) {
-    if ((res = MPI_Reduce((nodeID == 0) ? MPI_IN_PLACE : weights_v, weights_v,
-                          nr_cells, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD)) !=
-        MPI_SUCCESS)
-      mpi_error(res, "Failed to allreduce vertex weights.");
+  /* Allocate and fill the adjncy indexing array defining the graph of
+   * cells. */
+  idx_t *inds;
+  if ((inds = (idx_t *)malloc(sizeof(idx_t) * 26 * nr_cells)) == NULL)
+    error("Failed to allocate the inds array");
+  graph_init(s, inds, NULL);
+
+  /* Allocate and init weights. */
+  double *weights_v = NULL;
+  double *weights_e = NULL;
+  if (vweights) {
+    if ((weights_v = (double *)malloc(sizeof(double) * nr_cells)) == NULL)
+      error("Failed to allocate vertex weights arrays.");
+    bzero(weights_v, sizeof(double) * nr_cells);
+  }
+  if (eweights) {
+    if ((weights_e = (double *)malloc(sizeof(double) * 26 * nr_cells)) == NULL)
+      error("Failed to allocate edge weights arrays.");
+    bzero(weights_e, sizeof(double) * 26 * nr_cells);
   }
 
-  if ((res = MPI_Reduce((nodeID == 0) ? MPI_IN_PLACE : weights_e, weights_e,
-                        26 * nr_cells, MPI_DOUBLE, MPI_SUM, 0,
-                        MPI_COMM_WORLD)) != MPI_SUCCESS)
-    mpi_error(res, "Failed to allreduce edge weights.");
+  /* Gather weights. */
+  struct weights_mapper_data weights_data;
 
-  /* Allocate cell list for the partition. */
-  int *celllist = (int *)malloc(sizeof(int) * s->nr_cells);
-  if (celllist == NULL) error("Failed to allocate celllist");
+  weights_data.cells = cells;
+  weights_data.eweights = eweights;
+  weights_data.inds = inds;
+  weights_data.nodeID = nodeID;
+  weights_data.nr_cells = nr_cells;
+  weights_data.timebins = timebins;
+  weights_data.vweights = vweights;
+  weights_data.weights_e = weights_e;
+  weights_data.weights_v = weights_v;
+  weights_data.use_ticks = repartition->use_ticks;
 
-  /* As of here, only one node needs to compute the partition. */
-  if (nodeID == 0) {
+  ticks tic = getticks();
 
-    /* We need to rescale the weights into the range of an integer for METIS
-     * (really range of idx_t). Also we would like the range of vertex and
-     * edges weights to be similar so they balance. */
-    double wminv = 0.0;
-    double wmaxv = 0.0;
-    if (bothweights) {
-      wminv = weights_v[0];
-      wmaxv = weights_v[0];
-      for (int k = 0; k < nr_cells; k++) {
-        wmaxv = weights_v[k] > wmaxv ? weights_v[k] : wmaxv;
-        wminv = weights_v[k] < wminv ? weights_v[k] : wminv;
-      }
-    }
+  threadpool_map(&s->e->threadpool, partition_gather_weights, tasks, nr_tasks,
+                 sizeof(struct task), 0, &weights_data);
+  if (s->e->verbose)
+    message("weight mapper took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 
-    double wmine = weights_e[0];
-    double wmaxe = weights_e[0];
-    for (int k = 0; k < 26 * nr_cells; k++) {
-      wmaxe = weights_e[k] > wmaxe ? weights_e[k] : wmaxe;
-      wmine = weights_e[k] < wmine ? weights_e[k] : wmine;
-    }
+#ifdef SWIFT_DEBUG_CHECKS
+  check_weights(tasks, nr_tasks, &weights_data, weights_v, weights_e);
+#endif
 
-    if (bothweights) {
+  /* Merge the weights arrays across all nodes. */
+  int res;
+  if (vweights) {
+    res = MPI_Allreduce(MPI_IN_PLACE, weights_v, nr_cells, MPI_DOUBLE, MPI_SUM,
+                        MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS)
+      mpi_error(res, "Failed to allreduce vertex weights.");
+  }
 
-      /* Make range the same in both weights systems. */
-      if ((wmaxv - wminv) > (wmaxe - wmine)) {
-        double wscale = 1.0;
-        if ((wmaxe - wmine) > 0.0) {
-          wscale = (wmaxv - wminv) / (wmaxe - wmine);
-        }
-        for (int k = 0; k < 26 * nr_cells; k++) {
-          weights_e[k] = (weights_e[k] - wmine) * wscale + wminv;
-        }
-        wmine = wminv;
-        wmaxe = wmaxv;
+  if (eweights) {
+    res = MPI_Allreduce(MPI_IN_PLACE, weights_e, 26 * nr_cells, MPI_DOUBLE,
+                        MPI_SUM, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to allreduce edge weights.");
+  }
 
-      } else {
-        double wscale = 1.0;
-        if ((wmaxv - wminv) > 0.0) {
-          wscale = (wmaxe - wmine) / (wmaxv - wminv);
-        }
-        for (int k = 0; k < nr_cells; k++) {
-          weights_v[k] = (weights_v[k] - wminv) * wscale + wmine;
-        }
-        wminv = wmine;
-        wmaxv = wmaxe;
-      }
+    /* Allocate cell list for the partition. If not already done. */
+#ifdef HAVE_PARMETIS
+  int refine = 1;
+#endif
+  if (repartition->ncelllist != nr_cells) {
+#ifdef HAVE_PARMETIS
+    refine = 0;
+#endif
+    free(repartition->celllist);
+    repartition->ncelllist = 0;
+    if ((repartition->celllist = (int *)malloc(sizeof(int) * nr_cells)) == NULL)
+      error("Failed to allocate celllist");
+    repartition->ncelllist = nr_cells;
+  }
 
-      /* Scale to the METIS range. */
-      double wscale = 1.0;
-      if ((wmaxv - wminv) > 0.0) {
-        wscale = (metis_maxweight - 1.0) / (wmaxv - wminv);
+  /* We need to rescale the sum of the weights so that the sums of the two
+   * types of weights are less than IDX_MAX, that is the range of idx_t.  */
+  double vsum = 0.0;
+  if (vweights)
+    for (int k = 0; k < nr_cells; k++) vsum += weights_v[k];
+  double esum = 0.0;
+  if (eweights)
+    for (int k = 0; k < 26 * nr_cells; k++) esum += weights_e[k];
+
+  /* Do the scaling, if needed, keeping both weights in proportion. */
+  double vscale = 1.0;
+  double escale = 1.0;
+  if (vweights && eweights) {
+    if (vsum > esum) {
+      if (vsum > (double)IDX_MAX) {
+        vscale = (double)(IDX_MAX - 1000) / vsum;
+        escale = vscale;
       }
-      for (int k = 0; k < nr_cells; k++) {
-        weights_v[k] = (weights_v[k] - wminv) * wscale + 1.0;
+    } else {
+      if (esum > (double)IDX_MAX) {
+        escale = (double)(IDX_MAX - 1000) / esum;
+        vscale = escale;
       }
     }
+  } else if (vweights) {
+    if (vsum > (double)IDX_MAX) {
+      vscale = (double)(IDX_MAX - 1000) / vsum;
+    }
+  } else if (eweights) {
+    if (esum > (double)IDX_MAX) {
+      escale = (double)(IDX_MAX - 1000) / esum;
+    }
+  }
 
-    /* Scale to the METIS range. */
-    double wscale = 1.0;
-    if ((wmaxe - wmine) > 0.0) {
-      wscale = (metis_maxweight - 1.0) / (wmaxe - wmine);
+  if (vweights && vscale != 1.0) {
+    vsum = 0.0;
+    for (int k = 0; k < nr_cells; k++) {
+      weights_v[k] *= vscale;
+      vsum += weights_v[k];
     }
+    vscale = 1.0;
+  }
+  if (eweights && escale != 1.0) {
+    esum = 0.0;
     for (int k = 0; k < 26 * nr_cells; k++) {
-      weights_e[k] = (weights_e[k] - wmine) * wscale + 1.0;
+      weights_e[k] *= escale;
+      esum += weights_e[k];
     }
+    escale = 1.0;
+  }
 
-    /* And partition, use both weights or not as requested. */
-    if (bothweights)
-      pick_metis(s, nr_nodes, weights_v, weights_e, celllist);
-    else
-      pick_metis(s, nr_nodes, NULL, weights_e, celllist);
+  /* Balance edges and vertices when the edge weights are timebins, as these
+   * have no reason to have equivalent scales, we use an equipartition. */
+  if (timebins && eweights) {
 
-    /* Check that all cells have good values. */
-    for (int k = 0; k < nr_cells; k++)
-      if (celllist[k] < 0 || celllist[k] >= nr_nodes)
-        error("Got bad nodeID %d for cell %i.", celllist[k], k);
-
-    /* Check that the partition is complete and all nodes have some work. */
-    int present[nr_nodes];
-    int failed = 0;
-    for (int i = 0; i < nr_nodes; i++) present[i] = 0;
-    for (int i = 0; i < nr_cells; i++) present[celllist[i]]++;
-    for (int i = 0; i < nr_nodes; i++) {
-      if (!present[i]) {
-        failed = 1;
-        message("Node %d is not present after repartition", i);
-      }
+    /* Make sums the same. */
+    if (vsum > esum) {
+      escale = vsum / esum;
+      for (int k = 0; k < 26 * nr_cells; k++) weights_e[k] *= escale;
+    } else {
+      vscale = esum / vsum;
+      for (int k = 0; k < nr_cells; k++) weights_v[k] *= vscale;
     }
+  }
 
-    /* If partition failed continue with the current one, but make this
-     * clear. */
-    if (failed) {
-      message(
-          "WARNING: METIS repartition has failed, continuing with "
-          "the current partition, load balance will not be optimal");
-      for (int k = 0; k < nr_cells; k++) celllist[k] = cells[k].nodeID;
+    /* And repartition/ partition, using both weights or not as requested. */
+#ifdef HAVE_PARMETIS
+  if (repartition->usemetis) {
+    pick_metis(nodeID, s, nr_nodes, weights_v, weights_e,
+               repartition->celllist);
+  } else {
+    pick_parmetis(nodeID, s, nr_nodes, weights_v, weights_e, refine,
+                  repartition->adaptive, repartition->itr,
+                  repartition->celllist);
+  }
+#else
+  pick_metis(nodeID, s, nr_nodes, weights_v, weights_e, repartition->celllist);
+#endif
+
+  /* Check that all cells have good values. All nodes have same copy, so just
+   * check on one. */
+  if (nodeID == 0) {
+    for (int k = 0; k < nr_cells; k++)
+      if (repartition->celllist[k] < 0 || repartition->celllist[k] >= nr_nodes)
+        error("Got bad nodeID %d for cell %i.", repartition->celllist[k], k);
+  }
+
+  /* Check that the partition is complete and all nodes have some work. */
+  int present[nr_nodes];
+  int failed = 0;
+  for (int i = 0; i < nr_nodes; i++) present[i] = 0;
+  for (int i = 0; i < nr_cells; i++) present[repartition->celllist[i]]++;
+  for (int i = 0; i < nr_nodes; i++) {
+    if (!present[i]) {
+      failed = 1;
+      if (nodeID == 0) message("Node %d is not present after repartition", i);
     }
   }
 
-  /* Distribute the celllist partition and apply. */
-  if ((res = MPI_Bcast(celllist, s->nr_cells, MPI_INT, 0, MPI_COMM_WORLD)) !=
-      MPI_SUCCESS)
-    mpi_error(res, "Failed to bcast the cell list");
+  /* If partition failed continue with the current one, but make this clear. */
+  if (failed) {
+    if (nodeID == 0)
+      message(
+          "WARNING: repartition has failed, continuing with the current"
+          " partition, load balance will not be optimal");
+    for (int k = 0; k < nr_cells; k++)
+      repartition->celllist[k] = cells[k].nodeID;
+  }
 
   /* And apply to our cells */
-  split_metis(s, nr_nodes, celllist);
+  split_metis(s, nr_nodes, repartition->celllist);
 
   /* Clean up. */
   free(inds);
-  if (bothweights) free(weights_v);
-  free(weights_e);
-  free(celllist);
+  if (vweights) free(weights_v);
+  if (eweights) free(weights_e);
 }
-#endif
 
 /**
- * @brief Repartition the cells amongst the nodes using vertex weights
+ * @brief Repartition the cells amongst the nodes using weights based on
+ *        the memory use of particles in the cells.
  *
- * @param s The space containing the local cells.
- * @param nodeID our MPI node id.
- * @param nr_nodes number of MPI nodes.
+ * @param repartition the partition struct of the local engine.
+ * @param nodeID our nodeID.
+ * @param nr_nodes the number of nodes.
+ * @param s the space of cells holding our local particles.
  */
-#if defined(WITH_MPI) && defined(HAVE_METIS)
-static void repart_vertex_metis(struct space *s, int nodeID, int nr_nodes) {
+static void repart_memory_metis(struct repartition *repartition, int nodeID,
+                                int nr_nodes, struct space *s) {
 
-  /* Use particle counts as vertex weights. */
-  /* Space for particles per cell counts, which will be used as weights. */
+  /* Space for counts of particle memory use per cell. */
   double *weights = NULL;
   if ((weights = (double *)malloc(sizeof(double) * s->nr_cells)) == NULL)
-    error("Failed to allocate weights buffer.");
+    error("Failed to allocate cell weights buffer.");
+  bzero(weights, sizeof(double) * s->nr_cells);
 
-  /* Check each particle and accumulate the counts per cell. */
-  accumulate_counts(s, weights);
+  /* Check each particle and accumulate the sizes per cell. */
+  accumulate_sizes(s, weights);
 
   /* Get all the counts from all the nodes. */
-  int res;
-  if ((res = MPI_Allreduce(MPI_IN_PLACE, weights, s->nr_cells, MPI_DOUBLE,
-                           MPI_SUM, MPI_COMM_WORLD)) != MPI_SUCCESS)
-    mpi_error(res, "Failed to allreduce particle cell weights.");
+  if (MPI_Allreduce(MPI_IN_PLACE, weights, s->nr_cells, MPI_DOUBLE, MPI_SUM,
+                    MPI_COMM_WORLD) != MPI_SUCCESS)
+    error("Failed to allreduce particle cell weights.");
 
-  /* Main node does the partition calculation. */
-  int *celllist = (int *)malloc(sizeof(int) * s->nr_cells);
-  if (celllist == NULL) error("Failed to allocate celllist");
+    /* Allocate cell list for the partition. If not already done. */
+#ifdef HAVE_PARMETIS
+  int refine = 1;
+#endif
+  if (repartition->ncelllist != s->nr_cells) {
+#ifdef HAVE_PARMETIS
+    refine = 0;
+#endif
+    free(repartition->celllist);
+    repartition->ncelllist = 0;
+    if ((repartition->celllist = (int *)malloc(sizeof(int) * s->nr_cells)) ==
+        NULL)
+      error("Failed to allocate celllist");
+    repartition->ncelllist = s->nr_cells;
+  }
+
+  /* We need to rescale the sum of the weights so that the sum is
+   * less than IDX_MAX, that is the range of idx_t. */
+  double sum = 0.0;
+  for (int k = 0; k < s->nr_cells; k++) sum += weights[k];
+  if (sum > (double)IDX_MAX) {
+    double scale = (double)(IDX_MAX - 1000) / sum;
+    for (int k = 0; k < s->nr_cells; k++) weights[k] *= scale;
+  }
 
-  if (nodeID == 0) pick_metis(s, nr_nodes, weights, NULL, celllist);
+    /* And repartition. */
+#ifdef HAVE_PARMETIS
+  if (repartition->usemetis) {
+    pick_metis(nodeID, s, nr_nodes, weights, NULL, repartition->celllist);
+  } else {
+    pick_parmetis(nodeID, s, nr_nodes, weights, NULL, refine,
+                  repartition->adaptive, repartition->itr,
+                  repartition->celllist);
+  }
+#else
+  pick_metis(nodeID, s, nr_nodes, weights, NULL, repartition->celllist);
+#endif
 
-  /* Distribute the celllist partition and apply. */
-  if ((res = MPI_Bcast(celllist, s->nr_cells, MPI_INT, 0, MPI_COMM_WORLD)) !=
-      MPI_SUCCESS)
-    mpi_error(res, "Failed to bcast the cell list");
+  /* Check that all cells have good values. All nodes have same copy, so just
+   * check on one. */
+  if (nodeID == 0) {
+    for (int k = 0; k < s->nr_cells; k++)
+      if (repartition->celllist[k] < 0 || repartition->celllist[k] >= nr_nodes)
+        error("Got bad nodeID %d for cell %i.", repartition->celllist[k], k);
+  }
 
-  /* And apply to our cells */
-  split_metis(s, nr_nodes, celllist);
+  /* Check that the partition is complete and all nodes have some cells. */
+  int present[nr_nodes];
+  int failed = 0;
+  for (int i = 0; i < nr_nodes; i++) present[i] = 0;
+  for (int i = 0; i < s->nr_cells; i++) present[repartition->celllist[i]]++;
+  for (int i = 0; i < nr_nodes; i++) {
+    if (!present[i]) {
+      failed = 1;
+      if (nodeID == 0) message("Node %d is not present after repartition", i);
+    }
+  }
+
+  /* If partition failed continue with the current one, but make this clear. */
+  if (failed) {
+    if (nodeID == 0)
+      message(
+          "WARNING: repartition has failed, continuing with the current"
+          " partition, load balance will not be optimal");
+    for (int k = 0; k < s->nr_cells; k++)
+      repartition->celllist[k] = s->cells_top[k].nodeID;
+  }
 
-  free(weights);
-  free(celllist);
+  /* And apply to our cells */
+  split_metis(s, nr_nodes, repartition->celllist);
 }
-#endif
+#endif /* WITH_MPI && (HAVE_METIS || HAVE_PARMETIS) */
 
 /**
  * @brief Repartition the space using the given repartition type.
@@ -866,28 +1667,24 @@ void partition_repartition(struct repartition *reparttype, int nodeID,
                            int nr_nodes, struct space *s, struct task *tasks,
                            int nr_tasks) {
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
-
-  if (reparttype->type == REPART_METIS_VERTEX_COSTS_EDGE_COSTS) {
-    repart_edge_metis(0, 1, 0, nodeID, nr_nodes, s, tasks, nr_tasks);
-
-  } else if (reparttype->type == REPART_METIS_EDGE_COSTS) {
-    repart_edge_metis(0, 0, 0, nodeID, nr_nodes, s, tasks, nr_tasks);
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
 
-  } else if (reparttype->type == REPART_METIS_VERTEX_COUNTS_EDGE_COSTS) {
-    repart_edge_metis(1, 1, 0, nodeID, nr_nodes, s, tasks, nr_tasks);
+  ticks tic = getticks();
 
-  } else if (reparttype->type == REPART_METIS_VERTEX_COSTS_EDGE_TIMEBINS) {
-    repart_edge_metis(0, 1, 1, nodeID, nr_nodes, s, tasks, nr_tasks);
+  if (reparttype->type == REPART_METIS_VERTEX_EDGE_COSTS) {
+    repart_edge_metis(1, 1, 0, reparttype, nodeID, nr_nodes, s, tasks,
+                      nr_tasks);
 
-  } else if (reparttype->type == REPART_METIS_VERTEX_COUNTS_EDGE_TIMEBINS) {
-    repart_edge_metis(1, 1, 1, nodeID, nr_nodes, s, tasks, nr_tasks);
+  } else if (reparttype->type == REPART_METIS_EDGE_COSTS) {
+    repart_edge_metis(0, 1, 0, reparttype, nodeID, nr_nodes, s, tasks,
+                      nr_tasks);
 
-  } else if (reparttype->type == REPART_METIS_EDGE_TIMEBINS) {
-    repart_edge_metis(0, 0, 1, nodeID, nr_nodes, s, tasks, nr_tasks);
+  } else if (reparttype->type == REPART_METIS_VERTEX_COSTS_TIMEBINS) {
+    repart_edge_metis(1, 1, 1, reparttype, nodeID, nr_nodes, s, tasks,
+                      nr_tasks);
 
   } else if (reparttype->type == REPART_METIS_VERTEX_COUNTS) {
-    repart_vertex_metis(s, nodeID, nr_nodes);
+    repart_memory_metis(reparttype, nodeID, nr_nodes, s);
 
   } else if (reparttype->type == REPART_NONE) {
     /* Doing nothing. */
@@ -895,8 +1692,12 @@ void partition_repartition(struct repartition *reparttype, int nodeID,
   } else {
     error("Impossible repartition type");
   }
+
+  if (s->e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 #else
-  error("SWIFT was not compiled with METIS support.");
+  error("SWIFT was not compiled with METIS or ParMETIS support.");
 #endif
 }
 
@@ -918,6 +1719,7 @@ void partition_repartition(struct repartition *reparttype, int nodeID,
  */
 void partition_initial_partition(struct partition *initial_partition,
                                  int nodeID, int nr_nodes, struct space *s) {
+  ticks tic = getticks();
 
   /* Geometric grid partitioning. */
   if (initial_partition->type == INITPART_GRID) {
@@ -953,21 +1755,21 @@ void partition_initial_partition(struct partition *initial_partition,
 
   } else if (initial_partition->type == INITPART_METIS_WEIGHT ||
              initial_partition->type == INITPART_METIS_NOWEIGHT) {
-#if defined(WITH_MPI) && defined(HAVE_METIS)
-    /* Simple k-way partition selected by METIS using cell particle counts as
-     * weights or not. Should be best when starting with a inhomogeneous dist.
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+    /* Simple k-way partition selected by METIS using cell particle
+     * counts as weights or not. Should be best when starting with a
+     * inhomogeneous dist.
      */
 
-    /* Space for particles per cell counts, which will be used as weights or
-     * not. */
+    /* Space for particles sizes per cell, which will be used as weights. */
     double *weights = NULL;
     if (initial_partition->type == INITPART_METIS_WEIGHT) {
       if ((weights = (double *)malloc(sizeof(double) * s->nr_cells)) == NULL)
         error("Failed to allocate weights buffer.");
       bzero(weights, sizeof(double) * s->nr_cells);
 
-      /* Check each particle and accumilate the counts per cell. */
-      accumulate_counts(s, weights);
+      /* Check each particle and accumilate the sizes per cell. */
+      accumulate_sizes(s, weights);
 
       /* Get all the counts from all the nodes. */
       if (MPI_Allreduce(MPI_IN_PLACE, weights, s->nr_cells, MPI_DOUBLE, MPI_SUM,
@@ -975,14 +1777,19 @@ void partition_initial_partition(struct partition *initial_partition,
         error("Failed to allreduce particle cell weights.");
     }
 
-    /* Main node does the partition calculation. */
-    int *celllist = (int *)malloc(sizeof(int) * s->nr_cells);
-    if (celllist == NULL) error("Failed to allocate celllist");
-    if (nodeID == 0) pick_metis(s, nr_nodes, weights, NULL, celllist);
-
-    /* Distribute the celllist partition and apply. */
-    int res = MPI_Bcast(celllist, s->nr_cells, MPI_INT, 0, MPI_COMM_WORLD);
-    if (res != MPI_SUCCESS) mpi_error(res, "Failed to bcast the cell list");
+    /* Do the calculation. */
+    int *celllist = NULL;
+    if ((celllist = (int *)malloc(sizeof(int) * s->nr_cells)) == NULL)
+      error("Failed to allocate celllist");
+#ifdef HAVE_PARMETIS
+    if (initial_partition->usemetis) {
+      pick_metis(nodeID, s, nr_nodes, weights, NULL, celllist);
+    } else {
+      pick_parmetis(nodeID, s, nr_nodes, weights, NULL, 0, 0, 0.0f, celllist);
+    }
+#else
+    pick_metis(nodeID, s, nr_nodes, weights, NULL, celllist);
+#endif
 
     /* And apply to our cells */
     split_metis(s, nr_nodes, celllist);
@@ -999,7 +1806,7 @@ void partition_initial_partition(struct partition *initial_partition,
     if (weights != NULL) free(weights);
     free(celllist);
 #else
-    error("SWIFT was not compiled with METIS support");
+    error("SWIFT was not compiled with METIS or ParMETIS support");
 #endif
 
   } else if (initial_partition->type == INITPART_VECTORIZE) {
@@ -1007,8 +1814,9 @@ void partition_initial_partition(struct partition *initial_partition,
 #if defined(WITH_MPI)
     /* Vectorised selection, guaranteed to work for samples less than the
      * number of cells, but not very clumpy in the selection of regions. */
-    int *samplecells = (int *)malloc(sizeof(int) * nr_nodes * 3);
-    if (samplecells == NULL) error("Failed to allocate samplecells");
+    int *samplecells = NULL;
+    if ((samplecells = (int *)malloc(sizeof(int) * nr_nodes * 3)) == NULL)
+      error("Failed to allocate samplecells");
 
     if (nodeID == 0) {
       pick_vector(s, nr_nodes, samplecells);
@@ -1026,11 +1834,15 @@ void partition_initial_partition(struct partition *initial_partition,
     error("SWIFT was not compiled with MPI support");
 #endif
   }
+
+  if (s->e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 /**
  * @brief Initialises the partition and re-partition scheme from the parameter
- *        file
+ *        file.
  *
  * @param partition The #partition scheme to initialise.
  * @param repartition The #repartition scheme to initialise.
@@ -1044,11 +1856,11 @@ void partition_init(struct partition *partition,
 #ifdef WITH_MPI
 
 /* Defaults make use of METIS if available */
-#ifdef HAVE_METIS
-  const char *default_repart = "costs/costs";
-  const char *default_part = "simple_metis";
+#if defined(HAVE_METIS) || defined(HAVE_PARMETIS)
+  const char *default_repart = "fullcosts";
+  const char *default_part = "memory";
 #else
-  const char *default_repart = "none/none";
+  const char *default_repart = "none";
   const char *default_part = "grid";
 #endif
 
@@ -1070,24 +1882,24 @@ void partition_init(struct partition *partition,
     case 'v':
       partition->type = INITPART_VECTORIZE;
       break;
-#ifdef HAVE_METIS
-    case 's':
+#if defined(HAVE_METIS) || defined(HAVE_PARMETIS)
+    case 'r':
       partition->type = INITPART_METIS_NOWEIGHT;
       break;
-    case 'w':
+    case 'm':
       partition->type = INITPART_METIS_WEIGHT;
       break;
     default:
       message("Invalid choice of initial partition type '%s'.", part_type);
       error(
-          "Permitted values are: 'grid', 'simple_metis', 'weighted_metis'"
-          " or 'vectorized'");
+          "Permitted values are: 'grid', 'region', 'memory' or "
+          "'vectorized'");
 #else
     default:
       message("Invalid choice of initial partition type '%s'.", part_type);
       error(
           "Permitted values are: 'grid' or 'vectorized' when compiled "
-          "without METIS.");
+          "without METIS or ParMETIS.");
 #endif
   }
 
@@ -1101,40 +1913,33 @@ void partition_init(struct partition *partition,
   parser_get_opt_param_string(params, "DomainDecomposition:repartition_type",
                               part_type, default_repart);
 
-  if (strcmp("none/none", part_type) == 0) {
+  if (strcmp("none", part_type) == 0) {
     repartition->type = REPART_NONE;
 
-#ifdef HAVE_METIS
-  } else if (strcmp("costs/costs", part_type) == 0) {
-    repartition->type = REPART_METIS_VERTEX_COSTS_EDGE_COSTS;
-
-  } else if (strcmp("counts/none", part_type) == 0) {
-    repartition->type = REPART_METIS_VERTEX_COUNTS;
+#if defined(HAVE_METIS) || defined(HAVE_PARMETIS)
+  } else if (strcmp("fullcosts", part_type) == 0) {
+    repartition->type = REPART_METIS_VERTEX_EDGE_COSTS;
 
-  } else if (strcmp("none/costs", part_type) == 0) {
+  } else if (strcmp("edgecosts", part_type) == 0) {
     repartition->type = REPART_METIS_EDGE_COSTS;
 
-  } else if (strcmp("counts/costs", part_type) == 0) {
-    repartition->type = REPART_METIS_VERTEX_COUNTS_EDGE_COSTS;
-
-  } else if (strcmp("costs/time", part_type) == 0) {
-    repartition->type = REPART_METIS_VERTEX_COSTS_EDGE_TIMEBINS;
+  } else if (strcmp("memory", part_type) == 0) {
+    repartition->type = REPART_METIS_VERTEX_COUNTS;
 
-  } else if (strcmp("counts/time", part_type) == 0) {
-    repartition->type = REPART_METIS_VERTEX_COUNTS_EDGE_TIMEBINS;
+  } else if (strcmp("timecosts", part_type) == 0) {
+    repartition->type = REPART_METIS_VERTEX_COSTS_TIMEBINS;
 
-  } else if (strcmp("none/time", part_type) == 0) {
-    repartition->type = REPART_METIS_EDGE_TIMEBINS;
   } else {
     message("Invalid choice of re-partition type '%s'.", part_type);
     error(
-        "Permitted values are: 'none/none', 'costs/costs',"
-        "'counts/none', 'none/costs', 'counts/costs', "
-        "'costs/time', 'counts/time' or 'none/time'");
+        "Permitted values are: 'none', 'fullcosts', 'edgecosts' "
+        "'memory' or 'timecosts'");
 #else
   } else {
     message("Invalid choice of re-partition type '%s'.", part_type);
-    error("Permitted values are: 'none/none' when compiled without METIS.");
+    error(
+        "Permitted values are: 'none' when compiled without "
+        "METIS or ParMETIS.");
 #endif
   }
 
@@ -1150,23 +1955,87 @@ void partition_init(struct partition *partition,
         " than 1");
 
   /* Fraction of particles that should be updated before a repartition
-   * based on CPU time is considered. */
+   * based on CPU time is considered, needs to be high. */
   repartition->minfrac =
-      parser_get_opt_param_float(params, "DomainDecomposition:minfrac", 0.9f);
-  if (repartition->minfrac <= 0 || repartition->minfrac > 1)
+      parser_get_opt_param_float(params, "DomainDecomposition:minfrac", 0.95f);
+  if (repartition->minfrac <= 0.5 || repartition->minfrac > 1)
     error(
-        "Invalid DomainDecomposition:minfrac, must be greater than 0 and less "
-        "than equal to 1");
+        "Invalid DomainDecomposition:minfrac, must be greater than 0.5 "
+        "and less than equal to 1");
+
+  /* Use METIS or ParMETIS when ParMETIS is also available. */
+  repartition->usemetis =
+      parser_get_opt_param_int(params, "DomainDecomposition:usemetis", 0);
+  partition->usemetis = repartition->usemetis;
+
+  /* Use adaptive or simple refinement when repartitioning. */
+  repartition->adaptive =
+      parser_get_opt_param_int(params, "DomainDecomposition:adaptive", 1);
+
+  /* Ratio of interprocess communication time to data redistribution time. */
+  repartition->itr =
+      parser_get_opt_param_float(params, "DomainDecomposition:itr", 100.0f);
 
   /* Clear the celllist for use. */
   repartition->ncelllist = 0;
   repartition->celllist = NULL;
 
+  /* Do we have fixed costs available? These can be used to force
+   * repartitioning at any time. Not required if not repartitioning.*/
+  repartition->use_fixed_costs = parser_get_opt_param_int(
+      params, "DomainDecomposition:use_fixed_costs", 0);
+  if (repartition->type == REPART_NONE) repartition->use_fixed_costs = 0;
+
+  /* Check if this is true or required and initialise them. */
+  if (repartition->use_fixed_costs || repartition->trigger > 1) {
+    if (!repart_init_fixed_costs()) {
+      if (repartition->trigger <= 1) {
+        if (engine_rank == 0)
+          message(
+              "WARNING: fixed cost repartitioning was requested but is"
+              " not available.");
+        repartition->use_fixed_costs = 0;
+      } else {
+        error(
+            "Forced fixed cost repartitioning was requested but is"
+            " not available.");
+      }
+    }
+  }
+
 #else
   error("SWIFT was not compiled with MPI support");
 #endif
 }
 
+#ifdef WITH_MPI
+/**
+ * @brief Set the fixed costs for repartition using METIS.
+ *
+ *  These are determined using a run with the -y flag on which produces
+ *  a statistical analysis that is condensed into a .h file for inclusion.
+ *
+ *  If the default include file is used then no fixed costs are set and this
+ *  function will return 0.
+ */
+static int repart_init_fixed_costs(void) {
+
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+  /* Set the default fixed cost. */
+  for (int j = 0; j < task_type_count; j++) {
+    for (int k = 0; k < task_subtype_count; k++) {
+      repartition_costs[j][k] = 1.0;
+    }
+  }
+
+#include <partition_fixed_costs.h>
+  return HAVE_FIXED_COSTS;
+#endif
+
+  return 0;
+}
+#endif /* WITH_MPI */
+
 /*  General support */
 /*  =============== */
 
@@ -1181,8 +2050,9 @@ void partition_init(struct partition *partition,
  */
 static int check_complete(struct space *s, int verbose, int nregions) {
 
-  int *present = (int *)malloc(sizeof(int) * nregions);
-  if (present == NULL) error("Failed to allocate present array");
+  int *present = NULL;
+  if ((present = (int *)malloc(sizeof(int) * nregions)) == NULL)
+    error("Failed to allocate present array");
 
   int failed = 0;
   for (int i = 0; i < nregions; i++) present[i] = 0;
@@ -1203,6 +2073,195 @@ static int check_complete(struct space *s, int verbose, int nregions) {
   return (!failed);
 }
 
+#if defined(WITH_MPI) && (defined(HAVE_METIS) || defined(HAVE_PARMETIS))
+#ifdef SWIFT_DEBUG_CHECKS
+/**
+ * @brief Check that the threadpool version of the weights construction is
+ *        correct by comparing to the old serial code.
+ *
+ * @param tasks the list of tasks
+ * @param nr_tasks number of tasks
+ * @param mydata additional values as passed to threadpool
+ * @param ref_weights_v vertex weights to check
+ * @param ref_weights_e edge weights to check
+ */
+static void check_weights(struct task *tasks, int nr_tasks,
+                          struct weights_mapper_data *mydata,
+                          double *ref_weights_v, double *ref_weights_e) {
+
+  idx_t *inds = mydata->inds;
+  int eweights = mydata->eweights;
+  int nodeID = mydata->nodeID;
+  int nr_cells = mydata->nr_cells;
+  int timebins = mydata->timebins;
+  int vweights = mydata->vweights;
+  int use_ticks = mydata->use_ticks;
+
+  struct cell *cells = mydata->cells;
+
+  /* Allocate and init weights. */
+  double *weights_v = NULL;
+  double *weights_e = NULL;
+  if (vweights) {
+    if ((weights_v = (double *)malloc(sizeof(double) * nr_cells)) == NULL)
+      error("Failed to allocate vertex weights arrays.");
+    bzero(weights_v, sizeof(double) * nr_cells);
+  }
+  if (eweights) {
+    if ((weights_e = (double *)malloc(sizeof(double) * 26 * nr_cells)) == NULL)
+      error("Failed to allocate edge weights arrays.");
+    bzero(weights_e, sizeof(double) * 26 * nr_cells);
+  }
+
+  /* Loop over the tasks... */
+  for (int j = 0; j < nr_tasks; j++) {
+
+    /* Get a pointer to the kth task. */
+    struct task *t = &tasks[j];
+
+    /* Skip un-interesting tasks. */
+    if (t->type == task_type_send || t->type == task_type_recv ||
+        t->type == task_type_logger || t->implicit || t->ci == NULL)
+      continue;
+
+    /* Get weight for this task. Either based on fixed costs or task timings. */
+    double w = 0.0;
+    if (use_ticks) {
+      w = (double)t->toc - (double)t->tic;
+    } else {
+      w = repartition_costs[t->type][t->subtype];
+    }
+    if (w <= 0.0) continue;
+
+    /* Get the top-level cells involved. */
+    struct cell *ci, *cj;
+    for (ci = t->ci; ci->parent != NULL; ci = ci->parent)
+      ;
+    if (t->cj != NULL)
+      for (cj = t->cj; cj->parent != NULL; cj = cj->parent)
+        ;
+    else
+      cj = NULL;
+
+    /* Get the cell IDs. */
+    int cid = ci - cells;
+
+    /* Different weights for different tasks. */
+    if (t->type == task_type_drift_part || t->type == task_type_drift_gpart ||
+        t->type == task_type_ghost || t->type == task_type_extra_ghost ||
+        t->type == task_type_kick1 || t->type == task_type_kick2 ||
+        t->type == task_type_end_hydro_force ||
+        t->type == task_type_end_grav_force || t->type == task_type_cooling ||
+        t->type == task_type_star_formation || t->type == task_type_timestep ||
+        t->type == task_type_init_grav || t->type == task_type_grav_down ||
+        t->type == task_type_grav_long_range) {
+
+      /* Particle updates add only to vertex weight. */
+      if (vweights) weights_v[cid] += w;
+    }
+
+    /* Self interaction? */
+    else if ((t->type == task_type_self && ci->nodeID == nodeID) ||
+             (t->type == task_type_sub_self && cj == NULL &&
+              ci->nodeID == nodeID)) {
+      /* Self interactions add only to vertex weight. */
+      if (vweights) weights_v[cid] += w;
+
+    }
+
+    /* Pair? */
+    else if (t->type == task_type_pair || (t->type == task_type_sub_pair)) {
+      /* In-cell pair? */
+      if (ci == cj) {
+        /* Add weight to vertex for ci. */
+        if (vweights) weights_v[cid] += w;
+
+      }
+
+      /* Distinct cells. */
+      else {
+        /* Index of the jth cell. */
+        int cjd = cj - cells;
+
+        /* Local cells add weight to vertices. */
+        if (vweights && ci->nodeID == nodeID) {
+          weights_v[cid] += 0.5 * w;
+          if (cj->nodeID == nodeID) weights_v[cjd] += 0.5 * w;
+        }
+
+        if (eweights) {
+
+          /* Find indices of ci/cj neighbours. Note with gravity these cells may
+           * not be neighbours, in that case we ignore any edge weight for that
+           * pair. */
+          int ik = -1;
+          for (int k = 26 * cid; k < 26 * nr_cells; k++) {
+            if (inds[k] == cjd) {
+              ik = k;
+              break;
+            }
+          }
+
+          /* cj */
+          int jk = -1;
+          for (int k = 26 * cjd; k < 26 * nr_cells; k++) {
+            if (inds[k] == cid) {
+              jk = k;
+              break;
+            }
+          }
+          if (ik != -1 && jk != -1) {
+
+            if (timebins) {
+              /* Add weights to edge for all cells based on the expected
+               * interaction time (calculated as the time to the last expected
+               * time) as we want to avoid having active cells on the edges, so
+               * we cut for that. Note that weight is added to the local and
+               * remote cells, as we want to keep both away from any cuts, this
+               * can overflow int, so take care. */
+              int dti = num_time_bins - get_time_bin(ci->hydro.ti_end_min);
+              int dtj = num_time_bins - get_time_bin(cj->hydro.ti_end_min);
+              double dt = (double)(1 << dti) + (double)(1 << dtj);
+              weights_e[ik] += dt;
+              weights_e[jk] += dt;
+
+            } else {
+
+              /* Add weights from task costs to the edge. */
+              weights_e[ik] += w;
+              weights_e[jk] += w;
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /* Now do the comparisons. */
+  double refsum = 0.0;
+  double sum = 0.0;
+  for (int k = 0; k < nr_cells; k++) {
+    refsum += ref_weights_v[k];
+    sum += weights_v[k];
+  }
+  if (fabs(sum - refsum) > 1.0) {
+    error("vertex partition weights are not consistent (%f!=%f)", sum, refsum);
+  } else {
+    refsum = 0.0;
+    sum = 0.0;
+    for (int k = 0; k < 26 * nr_cells; k++) {
+      refsum += ref_weights_e[k];
+      sum += weights_e[k];
+    }
+    if (fabs(sum - refsum) > 1.0) {
+      error("edge partition weights are not consistent (%f!=%f)", sum, refsum);
+    }
+  }
+  message("partition weights checked successfully");
+}
+#endif
+#endif
+
 /**
  * @brief Partition a space of cells based on another space of cells.
  *
@@ -1262,10 +2321,13 @@ int partition_space_to_space(double *oldh, double *oldcdim, int *oldnodeIDs,
  *
  */
 void partition_store_celllist(struct space *s, struct repartition *reparttype) {
-  if (reparttype->celllist != NULL) free(reparttype->celllist);
-  reparttype->celllist = (int *)malloc(sizeof(int) * s->nr_cells);
-  reparttype->ncelllist = s->nr_cells;
-  if (reparttype->celllist == NULL) error("Failed to allocate celllist");
+  if (reparttype->ncelllist != s->nr_cells) {
+    free(reparttype->celllist);
+    if ((reparttype->celllist = (int *)malloc(sizeof(int) * s->nr_cells)) ==
+        NULL)
+      error("Failed to allocate celllist");
+    reparttype->ncelllist = s->nr_cells;
+  }
 
   for (int i = 0; i < s->nr_cells; i++) {
     reparttype->celllist[i] = s->cells_top[i].nodeID;
@@ -1292,8 +2354,9 @@ void partition_restore_celllist(struct space *s,
       }
     } else {
       error(
-          "Cannot apply the saved partition celllist as the number of"
-          "top-level cells (%d) is different to the saved number (%d)",
+          "Cannot apply the saved partition celllist as the "
+          "number of top-level cells (%d) is different to the "
+          "saved number (%d)",
           s->nr_cells, reparttype->ncelllist);
     }
   }
@@ -1329,8 +2392,9 @@ void partition_struct_restore(struct repartition *reparttype, FILE *stream) {
 
   /* Also restore the celllist, if we have one. */
   if (reparttype->ncelllist > 0) {
-    reparttype->celllist = (int *)malloc(sizeof(int) * reparttype->ncelllist);
-    if (reparttype->celllist == NULL) error("Failed to allocate celllist");
+    if ((reparttype->celllist =
+             (int *)malloc(sizeof(int) * reparttype->ncelllist)) == NULL)
+      error("Failed to allocate celllist");
     restart_read_blocks(reparttype->celllist,
                         sizeof(int) * reparttype->ncelllist, 1, stream, NULL,
                         "repartition celllist");
diff --git a/src/partition.h b/src/partition.h
index ec7d670a43537c4717090b857b6e6ba9186b8f1c..de0d95a5e343f1aa85a03c2cda49019f2fd08037 100644
--- a/src/partition.h
+++ b/src/partition.h
@@ -38,18 +38,16 @@ extern const char *initial_partition_name[];
 struct partition {
   enum partition_type type;
   int grid[3];
+  int usemetis;
 };
 
 /* Repartition type to use. */
 enum repartition_type {
   REPART_NONE = 0,
-  REPART_METIS_VERTEX_COSTS_EDGE_COSTS,
-  REPART_METIS_VERTEX_COUNTS,
+  REPART_METIS_VERTEX_EDGE_COSTS,
   REPART_METIS_EDGE_COSTS,
-  REPART_METIS_VERTEX_COUNTS_EDGE_COSTS,
-  REPART_METIS_VERTEX_COSTS_EDGE_TIMEBINS,
-  REPART_METIS_VERTEX_COUNTS_EDGE_TIMEBINS,
-  REPART_METIS_EDGE_TIMEBINS
+  REPART_METIS_VERTEX_COUNTS,
+  REPART_METIS_VERTEX_COSTS_TIMEBINS
 };
 
 /* Repartition preferences. */
@@ -57,8 +55,14 @@ struct repartition {
   enum repartition_type type;
   float trigger;
   float minfrac;
+  float itr;
+  int usemetis;
+  int adaptive;
+
+  int use_fixed_costs;
+  int use_ticks;
 
-  /* The partition as a cell list, if used. */
+  /* The partition as a cell-list. */
   int ncelllist;
   int *celllist;
 };
diff --git a/src/partition_fixed_costs.h b/src/partition_fixed_costs.h
new file mode 100644
index 0000000000000000000000000000000000000000..e713684b28ce81e60b9fa98a6078d1c8c370f935
--- /dev/null
+++ b/src/partition_fixed_costs.h
@@ -0,0 +1,25 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Peter W. Draper (p.w.draper@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_PARTITION_FIXED_COSTS_H
+#define SWIFT_PARTITION_FIXED_COSTS_H
+
+/* Default is no fixed costs. */
+#define HAVE_FIXED_COSTS 0
+
+#endif /* SWIFT_PARTITION_FIXED_COSTS_H */
diff --git a/src/physical_constants.c b/src/physical_constants.c
index 3936d07f4207263a4c391715ab0a8dd9ded6fa6d..3e3c72812c552aba1204086353dc7d239a5c36f9 100644
--- a/src/physical_constants.c
+++ b/src/physical_constants.c
@@ -32,7 +32,8 @@
 /**
  * @brief Converts physical constants to the internal unit system
  *
- * Some constants can be overwritten by the YAML file values.
+ * Some constants can be overwritten by the YAML file values. If the
+ * param argument is NULL, no overwriting is done.
  *
  * @param us The current internal system of units.
  * @param params The parsed parameter file.
@@ -48,8 +49,10 @@ void phys_const_init(const struct unit_system *us, struct swift_params *params,
       const_newton_G_cgs / units_general_cgs_conversion_factor(us, dimension_G);
 
   /* Overwrite G if present in the file */
-  internal_const->const_newton_G = parser_get_opt_param_double(
-      params, "PhysicalConstants:G", internal_const->const_newton_G);
+  if (params != NULL) {
+    internal_const->const_newton_G = parser_get_opt_param_double(
+        params, "PhysicalConstants:G", internal_const->const_newton_G);
+  }
 
   const float dimension_c[5] = {0, 1, -1, 0, 0}; /* [cm s^-1] */
   internal_const->const_speed_light_c =
@@ -126,6 +129,16 @@ void phys_const_init(const struct unit_system *us, struct swift_params *params,
   internal_const->const_T_CMB_0 =
       const_T_CMB_0_cgs /
       units_general_cgs_conversion_factor(us, dimension_temperature);
+
+  const float dimension_Yp[5] = {0, 0, 0, 0, 0}; /* [ - ] */
+  internal_const->const_primordial_He_fraction =
+      const_primordial_He_fraction_cgs /
+      units_general_cgs_conversion_factor(us, dimension_Yp);
+
+  const float dimension_reduced_hubble[5] = {0, 0, -1, 0, 0}; /* [s^-1] */
+  internal_const->const_reduced_hubble =
+      const_reduced_hubble_cgs /
+      units_general_cgs_conversion_factor(us, dimension_reduced_hubble);
 }
 
 /**
@@ -148,6 +161,7 @@ void phys_const_print(const struct phys_const *internal_const) {
           internal_const->const_astronomical_unit);
   message("%25s = %e", "Parsec", internal_const->const_parsec);
   message("%25s = %e", "Solar mass", internal_const->const_solar_mass);
+  message("%25s = %e", "km/s/Mpc", internal_const->const_reduced_hubble);
 }
 
 /**
diff --git a/src/physical_constants.h b/src/physical_constants.h
index 16628bfd6894699608e167d4b309fa5636209219..97da4b322a8bca1f978b43a4cabda2ff1cc1e517 100644
--- a/src/physical_constants.h
+++ b/src/physical_constants.h
@@ -93,6 +93,12 @@ struct phys_const {
 
   /*! Temperature of the CMB at present day */
   double const_T_CMB_0;
+
+  /*! Primordial Helium fraction */
+  double const_primordial_He_fraction;
+
+  /*! Reduced hubble constant units (i.e. H_0 / h) */
+  double const_reduced_hubble;
 };
 
 void phys_const_init(const struct unit_system* us, struct swift_params* params,
diff --git a/src/physical_constants_cgs.h b/src/physical_constants_cgs.h
index 40eef2c992e819e01980cbcbd7ea7f05721e93cf..4d1a54f68ba557c74fb489a9343eaf3846c481f4 100644
--- a/src/physical_constants_cgs.h
+++ b/src/physical_constants_cgs.h
@@ -95,4 +95,11 @@ const double const_earth_mass_cgs = 5.9724e27;
 /*! Temperature of the CMB at present day [K] */
 const double const_T_CMB_0_cgs = 2.7255;
 
+/*! Primordial Helium fraction [-] */
+const double const_primordial_He_fraction_cgs = 0.245;
+
+/*! Reduced Hubble constant units (i.e. H_0 / h == 100 km / s / Mpc in CGS)
+ * [s^-1] */
+const double const_reduced_hubble_cgs = 3.2407792894458e-18;
+
 #endif /* SWIFT_PHYSICAL_CONSTANTS_CGS_H */
diff --git a/src/potential.h b/src/potential.h
index 814b83c69180631db21e392704c0279808a6f03e..59567fe92296068f838c39a3eb5ff55c14005d48 100644
--- a/src/potential.h
+++ b/src/potential.h
@@ -34,6 +34,10 @@
 #include "./potential/point_mass/potential.h"
 #elif defined(EXTERNAL_POTENTIAL_ISOTHERMAL)
 #include "./potential/isothermal/potential.h"
+#elif defined(EXTERNAL_POTENTIAL_HERNQUIST)
+#include "./potential/hernquist/potential.h"
+#elif defined(EXTERNAL_POTENTIAL_NFW)
+#include "./potential/nfw/potential.h"
 #elif defined(EXTERNAL_POTENTIAL_DISC_PATCH)
 #include "./potential/disc_patch/potential.h"
 #elif defined(EXTERNAL_POTENTIAL_SINE_WAVE)
diff --git a/src/potential/hernquist/potential.h b/src/potential/hernquist/potential.h
new file mode 100644
index 0000000000000000000000000000000000000000..b98f45ff7ab4aeffd94f47f4931d3dd6c80d5642
--- /dev/null
+++ b/src/potential/hernquist/potential.h
@@ -0,0 +1,309 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_POTENTIAL_HERNQUIST_H
+#define SWIFT_POTENTIAL_HERNQUIST_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <math.h>
+
+/* Local includes. */
+#include "error.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "space.h"
+#include "units.h"
+
+/**
+ * @brief External Potential Properties - Hernquist potential
+ */
+struct external_potential {
+
+  /*! Position of the centre of potential */
+  double x[3];
+
+  /*! Mass of the halo */
+  double mass;
+
+  /*! Scale length (often as a, to prevent confusion with the cosmological
+   * scale-factor we use al) */
+  double al;
+
+  /*! Square of the softening length. Acceleration tends to zero within this
+   * distance from the origin */
+  double epsilon2;
+
+  /* Minimum timestep of the potential given by the timestep multiple
+   * times the orbital time at the softening length */
+  double mintime;
+
+  /*! Time-step condition pre-factor, is multiplied times the circular orbital
+   * time to get the time steps */
+  double timestep_mult;
+};
+
+/**
+ * @brief Computes the time-step in a Hernquist potential based on a
+ *        fraction of the circular orbital time
+ *
+ * @param time The current time.
+ * @param potential The #external_potential used in the run.
+ * @param phys_const The physical constants in internal units.
+ * @param g Pointer to the g-particle data.
+ */
+__attribute__((always_inline)) INLINE static float external_gravity_timestep(
+    double time, const struct external_potential* restrict potential,
+    const struct phys_const* restrict phys_const,
+    const struct gpart* restrict g) {
+
+  const float G_newton = phys_const->const_newton_G;
+
+  /* Calculate the relative potential with respect to the centre of the
+   * potential */
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+
+  /* calculate the radius  */
+  const float r = sqrtf(dx * dx + dy * dy + dz * dz + potential->epsilon2);
+  const float sqrtgm_inv = 1.f / sqrtf(G_newton * potential->mass);
+
+  /* Calculate the circular orbital period */
+  const float period = 2.f * M_PI * sqrtf(r) * potential->al *
+                       (1 + r / potential->al) * sqrtgm_inv;
+
+  /* Time-step as a fraction of the cirecular orbital time */
+  const float time_step = potential->timestep_mult * period;
+
+  return max(time_step, potential->mintime);
+}
+
+/**
+ * @brief Computes the gravitational acceleration from an Hernquist potential.
+ *
+ * Note that the accelerations are multiplied by Newton's G constant
+ * later on.
+ *
+ * a_x = - GM / (a+r)^2 * x/r
+ * a_y = - GM / (a+r)^2 * y/r
+ * a_z = - GM / (a+r)^2 * z/r
+ *
+ * @param time The current time.
+ * @param potential The #external_potential used in the run.
+ * @param phys_const The physical constants in internal units.
+ * @param g Pointer to the g-particle data.
+ */
+__attribute__((always_inline)) INLINE static void external_gravity_acceleration(
+    double time, const struct external_potential* potential,
+    const struct phys_const* const phys_const, struct gpart* g) {
+
+  /* Determine the position relative to the centre of the potential */
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+
+  /* Calculate the acceleration */
+  const float r = sqrtf(dx * dx + dy * dy + dz * dz + potential->epsilon2);
+  const float r_plus_a_inv = 1.f / (r + potential->al);
+  const float r_plus_a_inv2 = r_plus_a_inv * r_plus_a_inv;
+  const float term = -potential->mass * r_plus_a_inv2 / r;
+
+  g->a_grav[0] += term * dx;
+  g->a_grav[1] += term * dy;
+  g->a_grav[2] += term * dz;
+}
+
+/**
+ * @brief Computes the gravitational potential energy of a particle in an
+ * Hernquist potential.
+ *
+ * phi = - GM/(r+a)
+ *
+ * @param time The current time (unused here).
+ * @param potential The #external_potential used in the run.
+ * @param phys_const Physical constants in internal units.
+ * @param g Pointer to the particle data.
+ */
+__attribute__((always_inline)) INLINE static float
+external_gravity_get_potential_energy(
+    double time, const struct external_potential* potential,
+    const struct phys_const* const phys_const, const struct gpart* g) {
+
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+  const float r = sqrtf(dx * dx + dy * dy + dz * dz);
+  const float r_plus_alinv = 1.f / (r + potential->al);
+  return -phys_const->const_newton_G * potential->mass * r_plus_alinv;
+}
+
+/**
+ * @brief Initialises the external potential properties in the internal system
+ * of units.
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us The current internal system of units
+ * @param potential The external potential properties to initialize
+ */
+static INLINE void potential_init_backend(
+    struct swift_params* parameter_file, const struct phys_const* phys_const,
+    const struct unit_system* us, const struct space* s,
+    struct external_potential* potential) {
+
+  /* Define the default value */
+  static const int idealized_disk_default = 0;
+  static const double M200_default = 0.;
+  static const double V200_default = 0.;
+  static const double R200_default = 0.;
+
+  /* Read in the position of the centre of potential */
+  parser_get_param_double_array(parameter_file, "HernquistPotential:position",
+                                3, potential->x);
+
+  /* Is the position absolute or relative to the centre of the box? */
+  const int useabspos =
+      parser_get_param_int(parameter_file, "HernquistPotential:useabspos");
+
+  if (!useabspos) {
+    potential->x[0] += s->dim[0] / 2.;
+    potential->x[1] += s->dim[1] / 2.;
+    potential->x[2] += s->dim[2] / 2.;
+  }
+
+  /* check whether we use the more advanced idealized disk setting */
+  const int usedisk = parser_get_opt_param_int(
+      parameter_file, "HernquistPotential:idealizeddisk",
+      idealized_disk_default);
+
+  if (!usedisk) {
+    /* Read the parameters of the model in the case of the simple
+     * potential form \f$ \Phi = - \frac{GM}{r+a} \f$ */
+    potential->mass =
+        parser_get_param_double(parameter_file, "HernquistPotential:mass");
+    potential->al = parser_get_param_double(parameter_file,
+                                            "HernquistPotential:scalelength");
+  } else {
+
+    /* Read the parameters in the case of a idealized disk
+     * There are 3 different possible input parameters M200, V200 and R200
+     * First read in the mandatory parameters in this case */
+
+    const float G_newton = phys_const->const_newton_G;
+    const float kmoversoverMpc = phys_const->const_reduced_hubble;
+
+    /* Initialize the variables */
+    double M200 = parser_get_opt_param_double(
+        parameter_file, "HernquistPotential:M200", M200_default);
+    double V200 = parser_get_opt_param_double(
+        parameter_file, "HernquistPotential:V200", V200_default);
+    double R200 = parser_get_opt_param_double(
+        parameter_file, "HernquistPotential:R200", R200_default);
+    const double h =
+        parser_get_param_double(parameter_file, "HernquistPotential:h");
+
+    /* Hubble constant assumed for halo masses conversion */
+    const double H0 = h * kmoversoverMpc;
+
+    /* There are 3 legit runs possible with use disk,
+     * with a known M200, V200 or R200 */
+    if (M200 != 0.0) {
+      /* Calculate V200 and R200 from M200 */
+      V200 = cbrt(10. * M200 * G_newton * H0);
+      R200 = V200 / (10 * H0);
+
+    } else if (V200 != 0.0) {
+
+      /* Calculate M200 and R200 from V200 */
+      M200 = V200 * V200 * V200 / (10. * G_newton * H0);
+      R200 = V200 / (10 * H0);
+    } else if (R200 != 0.0) {
+
+      /* Calculate M200 and V200 from R200 */
+      V200 = 10. * H0 * R200;
+      M200 = V200 * V200 * V200 / (10. * G_newton * H0);
+    } else {
+      error("Please specify one of the 3 variables M200, V200 or R200");
+    }
+
+    /* message("M200 = %g, R200 = %g, V200 = %g", M200, R200, V200); */
+    /* message("H0 = %g", H0); */
+
+    /* get the concentration from the parameter file */
+    const double concentration = parser_get_param_double(
+        parameter_file, "HernquistPotential:concentration");
+
+    /* Calculate the Scale radius using the NFW definition */
+    const double RS = R200 / concentration;
+
+    /* Calculate the Hernquist equivalent scale length */
+    potential->al = RS * sqrt(1. * (log(1. + concentration) -
+                                    concentration / (1. + concentration)));
+
+    /* Depending on the disk mass and and the bulge mass the halo
+     * gets a different mass, because of this we read the fractions
+     * from the parameter file and calculate the absolute mass*/
+    const double diskfraction = parser_get_param_double(
+        parameter_file, "HernquistPotential:diskfraction");
+    const double bulgefraction = parser_get_param_double(
+        parameter_file, "HernquistPotential:bulgefraction");
+    /* Calculate the mass of the bulge and disk from the parameters  */
+    const double Mdisk = M200 * diskfraction;
+    const double Mbulge = M200 * bulgefraction;
+
+    /* Store the mass of the DM halo */
+    potential->mass = M200 - Mdisk - Mbulge;
+  }
+
+  /* Retrieve the timestep and softening of the potential */
+  potential->timestep_mult = parser_get_param_float(
+      parameter_file, "HernquistPotential:timestep_mult");
+  const float epsilon =
+      parser_get_param_double(parameter_file, "HernquistPotential:epsilon");
+  potential->epsilon2 = epsilon * epsilon;
+
+  /* Compute the minimal time-step. */
+  /* This is the circular orbital time at the softened radius */
+  const float sqrtgm = sqrtf(phys_const->const_newton_G * potential->mass);
+  potential->mintime = 2.f * sqrtf(epsilon) * potential->al * M_PI *
+                       (1. + epsilon / potential->al) / sqrtgm *
+                       potential->timestep_mult;
+}
+
+/**
+ * @brief prints the properties of the external potential to stdout.
+ *
+ * @param  potential the external potential properties.
+ */
+static inline void potential_print_backend(
+    const struct external_potential* potential) {
+
+  message(
+      "external potential is 'hernquist' with properties are (x,y,z) = (%e, "
+      "%e, %e), mass = %e "
+      "scale length = %e , minimum time = %e "
+      "timestep multiplier = %e",
+      potential->x[0], potential->x[1], potential->x[2], potential->mass,
+      potential->al, potential->mintime, potential->timestep_mult);
+}
+
+#endif /* SWIFT_POTENTIAL_HERNQUIST_H */
diff --git a/src/potential/isothermal/potential.h b/src/potential/isothermal/potential.h
index b5f8d7c39738bfe1895c73e6e59ae1279c0f74fa..160372210e41036f2737c10a4aa3d2ddac1077f2 100644
--- a/src/potential/isothermal/potential.h
+++ b/src/potential/isothermal/potential.h
@@ -148,7 +148,7 @@ external_gravity_get_potential_energy(
   const float dy = g->x[1] - potential->x[1];
   const float dz = g->x[2] - potential->x[2];
 
-  return -0.5f * potential->vrot * potential->vrot *
+  return 0.5f * potential->vrot * potential->vrot *
          logf(dx * dx + dy * dy + dz * dz + potential->epsilon2);
 }
 
@@ -166,11 +166,19 @@ static INLINE void potential_init_backend(
     const struct unit_system* us, const struct space* s,
     struct external_potential* potential) {
 
+  /* Read in the position of the centre of potential */
   parser_get_param_double_array(parameter_file, "IsothermalPotential:position",
                                 3, potential->x);
-  potential->x[0] += s->dim[0] / 2.;
-  potential->x[1] += s->dim[1] / 2.;
-  potential->x[2] += s->dim[2] / 2.;
+
+  /* Is the position absolute or relative to the centre of the box? */
+  const int useabspos =
+      parser_get_param_int(parameter_file, "IsothermalPotential:useabspos");
+
+  if (!useabspos) {
+    potential->x[0] += s->dim[0] / 2.;
+    potential->x[1] += s->dim[1] / 2.;
+    potential->x[2] += s->dim[2] / 2.;
+  }
 
   potential->vrot =
       parser_get_param_double(parameter_file, "IsothermalPotential:vrot");
diff --git a/src/potential/nfw/potential.h b/src/potential/nfw/potential.h
new file mode 100644
index 0000000000000000000000000000000000000000..28bafd439a36a41f2feecdc7169f8628fbed47f4
--- /dev/null
+++ b/src/potential/nfw/potential.h
@@ -0,0 +1,260 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018   Ashley Kelly ()
+ *                      Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_POTENTIAL_NFW_H
+#define SWIFT_POTENTIAL_NFW_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <float.h>
+#include <math.h>
+
+/* Local includes. */
+#include "error.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "space.h"
+#include "units.h"
+
+/**
+ * @brief External Potential Properties - NFW Potential
+                rho(r) = rho_0 / ( (r/R_s)*(1+r/R_s)^2 )
+
+        We however parameterise this in terms of c and virial_mass
+ */
+struct external_potential {
+
+  /*! Position of the centre of potential */
+  double x[3];
+
+  /*! The scale radius of the NFW potential */
+  double r_s;
+
+  /*! The pre-factor \f$ 4 \pi G \rho_0 \r_s^3 \f$ */
+  double pre_factor;
+
+  /*! The critical density of the universe */
+  double rho_c;
+
+  /*! The concentration parameter */
+  double c_200;
+
+  /*! The virial mass */
+  double M_200;
+
+  /*! Time-step condition pre_factor, this factor is used to multiply times the
+   * orbital time, so in the case of 0.01 we take 1% of the orbital time as
+   * the time integration steps */
+  double timestep_mult;
+
+  /*! Minimum time step based on the orbital time at the softening times
+   * the timestep_mult */
+  double mintime;
+
+  /*! Common log term \f$ \ln(1+c_{200}) - \frac{c_{200}}{1 + c_{200}} \f$ */
+  double log_c200_term;
+
+  /*! Softening length */
+  double eps;
+};
+
+/**
+ * @brief Computes the time-step due to the acceleration from the NFW potential
+ *        as a fraction (timestep_mult) of the circular orbital time of that
+ *        particle.
+ *
+ * @param time The current time.
+ * @param potential The #external_potential used in the run.
+ * @param phys_const The physical constants in internal units.
+ * @param g Pointer to the g-particle data.
+ */
+__attribute__((always_inline)) INLINE static float external_gravity_timestep(
+    double time, const struct external_potential* restrict potential,
+    const struct phys_const* restrict phys_const,
+    const struct gpart* restrict g) {
+
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+
+  const float r =
+      sqrtf(dx * dx + dy * dy + dz * dz + potential->eps * potential->eps);
+
+  const float mr = potential->M_200 *
+                   (logf(1.f + r / potential->r_s) - r / (r + potential->r_s)) /
+                   potential->log_c200_term;
+
+  const float period =
+      2 * M_PI * r * sqrtf(r / (phys_const->const_newton_G * mr));
+
+  /* Time-step as a fraction of the circular period */
+  const float time_step = potential->timestep_mult * period;
+
+  return max(time_step, potential->mintime);
+}
+
+/**
+ * @brief Computes the gravitational acceleration from an NFW Halo potential.
+ *
+ * Note that the accelerations are multiplied by Newton's G constant
+ * later on.
+ *
+ * a_x = 4 pi \rho_0 r_s^3 ( 1/((r+rs)*r^2) - log(1+r/rs)/r^3) * x
+ * a_y = 4 pi \rho_0 r_s^3 ( 1/((r+rs)*r^2) - log(1+r/rs)/r^3) * y
+ * a_z = 4 pi \rho_0 r_s^3 ( 1/((r+rs)*r^2) - log(1+r/rs)/r^3) * z
+ *
+ * @param time The current time.
+ * @param potential The #external_potential used in the run.
+ * @param phys_const The physical constants in internal units.
+ * @param g Pointer to the g-particle data.
+ */
+__attribute__((always_inline)) INLINE static void external_gravity_acceleration(
+    double time, const struct external_potential* restrict potential,
+    const struct phys_const* restrict phys_const, struct gpart* restrict g) {
+
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+
+  const float r =
+      sqrtf(dx * dx + dy * dy + dz * dz + potential->eps * potential->eps);
+  const float term1 = potential->pre_factor;
+  const float term2 = (1.0f / ((r + potential->r_s) * r * r) -
+                       logf(1.0f + r / potential->r_s) / (r * r * r));
+
+  g->a_grav[0] += term1 * term2 * dx;
+  g->a_grav[1] += term1 * term2 * dy;
+  g->a_grav[2] += term1 * term2 * dz;
+}
+
+/**
+ * @brief Computes the gravitational potential energy of a particle in an
+ * NFW potential.
+ *
+ * phi = -4 * pi * G * rho_0 * r_s^3 * ln(1+r/r_s)
+ *
+ * @param time The current time (unused here).
+ * @param potential The #external_potential used in the run.
+ * @param phys_const Physical constants in internal units.
+ * @param g Pointer to the particle data.
+ */
+__attribute__((always_inline)) INLINE static float
+external_gravity_get_potential_energy(
+    double time, const struct external_potential* potential,
+    const struct phys_const* const phys_const, const struct gpart* g) {
+
+  const float dx = g->x[0] - potential->x[0];
+  const float dy = g->x[1] - potential->x[1];
+  const float dz = g->x[2] - potential->x[2];
+
+  const float r =
+      sqrtf(dx * dx + dy * dy + dz * dz + potential->eps * potential->eps);
+  const float term1 = -potential->pre_factor / r;
+  const float term2 = logf(1.0f + r / potential->r_s);
+
+  return term1 * term2;
+}
+
+/**
+ * @brief Initialises the external potential properties in the internal system
+ * of units.
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us The current internal system of units
+ * @param potential The external potential properties to initialize
+ */
+static INLINE void potential_init_backend(
+    struct swift_params* parameter_file, const struct phys_const* phys_const,
+    const struct unit_system* us, const struct space* s,
+    struct external_potential* potential) {
+
+  /* Read in the position of the centre of potential */
+  parser_get_param_double_array(parameter_file, "NFWPotential:position", 3,
+                                potential->x);
+
+  /* Is the position absolute or relative to the centre of the box? */
+  const int useabspos =
+      parser_get_param_int(parameter_file, "NFWPotential:useabspos");
+
+  if (!useabspos) {
+    potential->x[0] += s->dim[0] / 2.;
+    potential->x[1] += s->dim[1] / 2.;
+    potential->x[2] += s->dim[2] / 2.;
+  }
+
+  /* Read the other parameters of the model */
+  potential->timestep_mult =
+      parser_get_param_double(parameter_file, "NFWPotential:timestep_mult");
+  potential->c_200 =
+      parser_get_param_double(parameter_file, "NFWPotential:concentration");
+  potential->M_200 =
+      parser_get_param_double(parameter_file, "NFWPotential:M_200");
+  potential->rho_c =
+      parser_get_param_double(parameter_file, "NFWPotential:critical_density");
+  potential->eps = 0.05;
+
+  /* Compute R_200 */
+  const double R_200 =
+      cbrtf(3.0 * potential->M_200 / (4. * M_PI * 200.0 * potential->rho_c));
+
+  /* NFW scale-radius */
+  potential->r_s = R_200 / potential->c_200;
+  const double r_s3 = potential->r_s * potential->r_s * potential->r_s;
+
+  /* Log(c_200) term appearing in many expressions */
+  potential->log_c200_term =
+      log(1. + potential->c_200) - potential->c_200 / (1. + potential->c_200);
+
+  const double rho_0 =
+      potential->M_200 / (4.f * M_PI * r_s3 * potential->log_c200_term);
+
+  /* Pre-factor for the accelerations (note G is multiplied in later on) */
+  potential->pre_factor = 4.0f * M_PI * rho_0 * r_s3;
+
+  /* Compute the orbital time at the softening radius */
+  const double sqrtgm = sqrt(phys_const->const_newton_G * potential->M_200);
+  const double epslnthing = log(1.f + potential->eps / potential->r_s) -
+                            potential->eps / (potential->eps + potential->r_s);
+
+  potential->mintime = 2. * M_PI * potential->eps * sqrtf(potential->eps) *
+                       sqrtf(potential->log_c200_term / epslnthing) / sqrtgm *
+                       potential->timestep_mult;
+}
+
+/**
+ * @brief Prints the properties of the external potential to stdout.
+ *
+ * @param  potential The external potential properties.
+ */
+static INLINE void potential_print_backend(
+    const struct external_potential* potential) {
+
+  message(
+      "External potential is 'NFW' with properties are (x,y,z) = (%e, "
+      "%e, %e), scale radius = %e "
+      "timestep multiplier = %e, mintime = %e",
+      potential->x[0], potential->x[1], potential->x[2], potential->r_s,
+      potential->timestep_mult, potential->mintime);
+}
+
+#endif /* SWIFT_POTENTIAL_NFW_H */
diff --git a/src/potential/point_mass/potential.h b/src/potential/point_mass/potential.h
index f9d56a1ff165f2331c91ea828b5ffe0e0db76c2f..5ae03f8637708d75800a6a7fb283b98bdb42cec2 100644
--- a/src/potential/point_mass/potential.h
+++ b/src/potential/point_mass/potential.h
@@ -137,7 +137,7 @@ external_gravity_get_potential_energy(
   const float dx = g->x[0] - potential->x[0];
   const float dy = g->x[1] - potential->x[1];
   const float dz = g->x[2] - potential->x[2];
-  const float rinv = 1. / sqrtf(dx * dx + dy * dy + dz * dz);
+  const float rinv = 1.f / sqrtf(dx * dx + dy * dy + dz * dz);
   return -phys_const->const_newton_G * potential->mass * rinv;
 }
 
@@ -156,8 +156,21 @@ static INLINE void potential_init_backend(
     const struct unit_system* us, const struct space* s,
     struct external_potential* potential) {
 
+  /* Read in the position of the centre of potential */
   parser_get_param_double_array(parameter_file, "PointMassPotential:position",
                                 3, potential->x);
+
+  /* Is the position absolute or relative to the centre of the box? */
+  const int useabspos =
+      parser_get_param_int(parameter_file, "PointMassPotential:useabspos");
+
+  if (!useabspos) {
+    potential->x[0] += s->dim[0] / 2.;
+    potential->x[1] += s->dim[1] / 2.;
+    potential->x[2] += s->dim[2] / 2.;
+  }
+
+  /* Read the other parameters of the model */
   potential->mass =
       parser_get_param_double(parameter_file, "PointMassPotential:mass");
   potential->timestep_mult = parser_get_param_float(
diff --git a/src/potential/point_mass_softened/potential.h b/src/potential/point_mass_softened/potential.h
index 0e35e7bb9870c7954b47316a3cc30bb68cde5fc4..050bc1a00c98da4c350e59cf1ef8ef855094e552 100644
--- a/src/potential/point_mass_softened/potential.h
+++ b/src/potential/point_mass_softened/potential.h
@@ -183,8 +183,21 @@ static INLINE void potential_init_backend(
     const struct unit_system* us, const struct space* s,
     struct external_potential* potential) {
 
+  /* Read in the position of the centre of potential */
   parser_get_param_double_array(parameter_file, "PointMassPotential:position",
                                 3, potential->x);
+
+  /* Is the position absolute or relative to the centre of the box? */
+  const int useabspos =
+      parser_get_param_int(parameter_file, "PointMassPotential:useabspos");
+
+  if (!useabspos) {
+    potential->x[0] += s->dim[0] / 2.;
+    potential->x[1] += s->dim[1] / 2.;
+    potential->x[2] += s->dim[2] / 2.;
+  }
+
+  /* Read the other parameters of the model */
   potential->mass =
       parser_get_param_double(parameter_file, "PointMassPotential:mass");
   potential->timestep_mult = parser_get_param_float(
diff --git a/src/profiler.c b/src/profiler.c
index 58fd279d312d3c752d65ccaceab803ace66fddac..6fed108ea2b1359238ee47d37cd90380086ec6bc 100644
--- a/src/profiler.c
+++ b/src/profiler.c
@@ -21,6 +21,7 @@
 #include "../config.h"
 
 /* Some standard headers. */
+#include <math.h>
 #include <string.h>
 
 /* This object's header. */
diff --git a/src/proxy.c b/src/proxy.c
index 965a8660ce32e1f151bb6c6b3cf944436f39129d..afe59853dea3a8118823b1dc37c5d9c073a63b98 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -39,10 +39,16 @@
 
 /* Local headers. */
 #include "cell.h"
+#include "engine.h"
 #include "error.h"
 #include "memuse.h"
 #include "space.h"
 
+#ifdef WITH_MPI
+/* MPI data type for the communications */
+MPI_Datatype pcell_mpi_type;
+#endif
+
 /**
  * @brief Exchange tags between nodes.
  *
@@ -58,14 +64,16 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
 
 #ifdef WITH_MPI
 
+  ticks tic2 = getticks();
+
   /* Run through the cells and get the size of the tags that will be sent off.
    */
   int count_out = 0;
   int offset_out[s->nr_cells];
   for (int k = 0; k < s->nr_cells; k++) {
     offset_out[k] = count_out;
-    if (s->cells_top[k].sendto) {
-      count_out += s->cells_top[k].pcell_size;
+    if (s->cells_top[k].mpi.sendto) {
+      count_out += s->cells_top[k].mpi.pcell_size;
     }
   }
 
@@ -75,7 +83,7 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
   for (int k = 0; k < num_proxies; k++) {
     for (int j = 0; j < proxies[k].nr_cells_in; j++) {
       offset_in[proxies[k].cells_in[j] - s->cells_top] = count_in;
-      count_in += proxies[k].cells_in[j]->pcell_size;
+      count_in += proxies[k].cells_in[j]->mpi.pcell_size;
     }
   }
 
@@ -90,11 +98,15 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
 
   /* Pack the local tags. */
   for (int k = 0; k < s->nr_cells; k++) {
-    if (s->cells_top[k].sendto) {
+    if (s->cells_top[k].mpi.sendto) {
       cell_pack_tags(&s->cells_top[k], &tags_out[offset_out[k]]);
     }
   }
 
+  if (s->e->verbose)
+    message("Cell pack tags took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
   /* Allocate the incoming and outgoing request handles. */
   int num_reqs_out = 0;
   int num_reqs_in = 0;
@@ -102,8 +114,8 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
     num_reqs_in += proxies[k].nr_cells_in;
     num_reqs_out += proxies[k].nr_cells_out;
   }
-  MPI_Request *reqs_in;
-  int *cids_in;
+  MPI_Request *reqs_in = NULL;
+  int *cids_in = NULL;
   if ((reqs_in = (MPI_Request *)malloc(sizeof(MPI_Request) *
                                        (num_reqs_in + num_reqs_out))) == NULL ||
       (cids_in = (int *)malloc(sizeof(int) * (num_reqs_in + num_reqs_out))) ==
@@ -118,8 +130,8 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
       const int cid = proxies[k].cells_in[j] - s->cells_top;
       cids_in[recv_rid] = cid;
       int err = MPI_Irecv(
-          &tags_in[offset_in[cid]], proxies[k].cells_in[j]->pcell_size, MPI_INT,
-          proxies[k].nodeID, cid, MPI_COMM_WORLD, &reqs_in[recv_rid]);
+          &tags_in[offset_in[cid]], proxies[k].cells_in[j]->mpi.pcell_size,
+          MPI_INT, proxies[k].nodeID, cid, MPI_COMM_WORLD, &reqs_in[recv_rid]);
       if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv tags.");
       recv_rid += 1;
     }
@@ -127,13 +139,15 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
       const int cid = proxies[k].cells_out[j] - s->cells_top;
       cids_out[send_rid] = cid;
       int err = MPI_Isend(
-          &tags_out[offset_out[cid]], proxies[k].cells_out[j]->pcell_size,
+          &tags_out[offset_out[cid]], proxies[k].cells_out[j]->mpi.pcell_size,
           MPI_INT, proxies[k].nodeID, cid, MPI_COMM_WORLD, &reqs_out[send_rid]);
       if (err != MPI_SUCCESS) mpi_error(err, "Failed to isend tags.");
       send_rid += 1;
     }
   }
 
+  tic2 = getticks();
+
   /* Wait for each recv and unpack the tags into the local cells. */
   for (int k = 0; k < num_reqs_in; k++) {
     int pid = MPI_UNDEFINED;
@@ -145,6 +159,10 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
     cell_unpack_tags(&tags_in[offset_in[cid]], &s->cells_top[cid]);
   }
 
+  if (s->e->verbose)
+    message("Cell unpack tags took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
   /* Wait for all the sends to have completed. */
   if (MPI_Waitall(num_reqs_out, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
     error("MPI_Waitall on sends failed.");
@@ -160,15 +178,196 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
 #endif
 }
 
+/**
+ * @brief Exchange cells with a remote node, first part.
+ *
+ * The first part of the transaction sends the local cell count and the packed
+ * #pcell array to the destination node, and enqueues an @c MPI_Irecv for
+ * the foreign cell counts.
+ *
+ * @param p The #proxy.
+ */
+void proxy_cells_exchange_first(struct proxy *p) {
+
+#ifdef WITH_MPI
+
+  /* Get the number of pcells we will need to send. */
+  p->size_pcells_out = 0;
+  for (int k = 0; k < p->nr_cells_out; k++)
+    p->size_pcells_out += p->cells_out[k]->mpi.pcell_size;
+
+  /* Send the number of pcells. */
+  int err = MPI_Isend(&p->size_pcells_out, 1, MPI_INT, p->nodeID,
+                      p->mynodeID * proxy_tag_shift + proxy_tag_count,
+                      MPI_COMM_WORLD, &p->req_cells_count_out);
+  if (err != MPI_SUCCESS) mpi_error(err, "Failed to isend nr of pcells.");
+  // message( "isent pcell count (%i) from node %i to node %i." ,
+  // p->size_pcells_out , p->mynodeID , p->nodeID ); fflush(stdout);
+
+  /* Allocate and fill the pcell buffer. */
+  if (p->pcells_out != NULL) free(p->pcells_out);
+  if (posix_memalign((void **)&p->pcells_out, SWIFT_STRUCT_ALIGNMENT,
+                     sizeof(struct pcell) * p->size_pcells_out) != 0)
+    error("Failed to allocate pcell_out buffer.");
+  for (int ind = 0, k = 0; k < p->nr_cells_out; k++) {
+    memcpy(&p->pcells_out[ind], p->cells_out[k]->mpi.pcell,
+           sizeof(struct pcell) * p->cells_out[k]->mpi.pcell_size);
+    ind += p->cells_out[k]->mpi.pcell_size;
+  }
+
+  /* Send the pcell buffer. */
+  err = MPI_Isend(p->pcells_out, p->size_pcells_out, pcell_mpi_type, p->nodeID,
+                  p->mynodeID * proxy_tag_shift + proxy_tag_cells,
+                  MPI_COMM_WORLD, &p->req_cells_out);
+
+  if (err != MPI_SUCCESS) mpi_error(err, "Failed to pcell_out buffer.");
+  // message( "isent pcells (%i) from node %i to node %i." , p->size_pcells_out
+  // , p->mynodeID , p->nodeID ); fflush(stdout);
+
+  /* Receive the number of pcells. */
+  err = MPI_Irecv(&p->size_pcells_in, 1, MPI_INT, p->nodeID,
+                  p->nodeID * proxy_tag_shift + proxy_tag_count, MPI_COMM_WORLD,
+                  &p->req_cells_count_in);
+  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv nr of pcells.");
+    // message( "irecv pcells count on node %i from node %i." , p->mynodeID ,
+    // p->nodeID ); fflush(stdout);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+/**
+ * @brief Exchange cells with a remote node, second part.
+ *
+ * Once the incomming cell count has been received, allocate a buffer
+ * for the foreign packed #pcell array and emit the @c MPI_Irecv for
+ * it.
+ *
+ * @param p The #proxy.
+ */
+void proxy_cells_exchange_second(struct proxy *p) {
+
+#ifdef WITH_MPI
+
+  /* Re-allocate the pcell_in buffer. */
+  if (p->pcells_in != NULL) free(p->pcells_in);
+  if (posix_memalign((void **)&p->pcells_in, SWIFT_STRUCT_ALIGNMENT,
+                     sizeof(struct pcell) * p->size_pcells_in) != 0)
+    error("Failed to allocate pcell_in buffer.");
+
+  /* Receive the particle buffers. */
+  int err = MPI_Irecv(p->pcells_in, p->size_pcells_in, pcell_mpi_type,
+                      p->nodeID, p->nodeID * proxy_tag_shift + proxy_tag_cells,
+                      MPI_COMM_WORLD, &p->req_cells_in);
+
+  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv part data.");
+    // message( "irecv pcells (%i) on node %i from node %i." , p->size_pcells_in
+    // , p->mynodeID , p->nodeID ); fflush(stdout);
+
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
+
+#ifdef WITH_MPI
+
+void proxy_cells_count_mapper(void *map_data, int num_elements,
+                              void *extra_data) {
+  struct cell *cells = (struct cell *)map_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    if (cells[k].mpi.sendto) cells[k].mpi.pcell_size = cell_getsize(&cells[k]);
+  }
+}
+
+struct pack_mapper_data {
+  struct space *s;
+  int *offset;
+  struct pcell *pcells;
+  int with_gravity;
+};
+
+void proxy_cells_pack_mapper(void *map_data, int num_elements,
+                             void *extra_data) {
+  struct cell *cells = (struct cell *)map_data;
+  struct pack_mapper_data *data = (struct pack_mapper_data *)extra_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    if (cells[k].mpi.sendto) {
+      ptrdiff_t ind = &cells[k] - data->s->cells_top;
+      cells[k].mpi.pcell = &data->pcells[data->offset[ind]];
+      cell_pack(&cells[k], cells[k].mpi.pcell, data->with_gravity);
+    }
+  }
+}
+
+void proxy_cells_exchange_first_mapper(void *map_data, int num_elements,
+                                       void *extra_data) {
+  struct proxy *proxies = (struct proxy *)map_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    proxy_cells_exchange_first(&proxies[k]);
+  }
+}
+
+struct wait_and_unpack_mapper_data {
+  struct space *s;
+  int num_proxies;
+  MPI_Request *reqs_in;
+  struct proxy *proxies;
+  int with_gravity;
+  swift_lock_type lock;
+};
+
+void proxy_cells_wait_and_unpack_mapper(void *unused_map_data, int num_elements,
+                                        void *extra_data) {
+
+  // MATTHIEU: This is currently unused. Scalar (non-threadpool) version is
+  // faster but we still need to explore why this happens.
+
+  struct wait_and_unpack_mapper_data *data =
+      (struct wait_and_unpack_mapper_data *)extra_data;
+
+  for (int k = 0; k < num_elements; k++) {
+    int pid = MPI_UNDEFINED;
+    MPI_Status status;
+    int res;
+
+    /* We need a lock to prevent concurrent calls to MPI_Waitany on
+       the same array of requests since this is not supported in the MPI
+       standard (v3.1). This is not really a problem since the threads
+       would block inside MPI_Waitany anyway. */
+    lock_lock(&data->lock);
+    if ((res = MPI_Waitany(data->num_proxies, data->reqs_in, &pid, &status)) !=
+            MPI_SUCCESS ||
+        pid == MPI_UNDEFINED)
+      mpi_error(res, "MPI_Waitany failed.");
+    if (lock_unlock(&data->lock) != 0) {
+      error("Failed to release lock.");
+    }
+
+    // message( "cell data from proxy %i has arrived." , pid );
+    for (int count = 0, j = 0; j < data->proxies[pid].nr_cells_in; j++)
+      count += cell_unpack(&data->proxies[pid].pcells_in[count],
+                           data->proxies[pid].cells_in[j], data->s,
+                           data->with_gravity);
+  }
+}
+
+#endif  // WITH_MPI
+
 /**
  * @brief Exchange the cell structures with all proxies.
  *
  * @param proxies The list of #proxy that will send/recv cells.
  * @param num_proxies The number of proxies.
  * @param s The space into which the particles will be unpacked.
+ * @param with_gravity Are we running with gravity and hence need
+ *      to exchange multipoles?
  */
 void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
-                          struct space *s) {
+                          struct space *s, const int with_gravity) {
 
 #ifdef WITH_MPI
 
@@ -179,33 +378,46 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
   MPI_Request *reqs_in = reqs;
   MPI_Request *reqs_out = &reqs[num_proxies];
 
+  ticks tic2 = getticks();
+
   /* Run through the cells and get the size of the ones that will be sent off.
    */
+  threadpool_map(&s->e->threadpool, proxy_cells_count_mapper, s->cells_top,
+                 s->nr_cells, sizeof(struct cell), /*chunk=*/0,
+                 /*extra_data=*/NULL);
   int count_out = 0;
   int offset[s->nr_cells];
   for (int k = 0; k < s->nr_cells; k++) {
     offset[k] = count_out;
-    if (s->cells_top[k].sendto)
-      count_out +=
-          (s->cells_top[k].pcell_size = cell_getsize(&s->cells_top[k]));
+    if (s->cells_top[k].mpi.sendto) count_out += s->cells_top[k].mpi.pcell_size;
   }
 
+  if (s->e->verbose)
+    message("Counting cells to send took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
   /* Allocate the pcells. */
   struct pcell *pcells = NULL;
   if (posix_memalign((void **)&pcells, SWIFT_CACHE_ALIGNMENT,
                      sizeof(struct pcell) * count_out) != 0)
     error("Failed to allocate pcell buffer.");
 
+  tic2 = getticks();
+
   /* Pack the cells. */
-  for (int k = 0; k < s->nr_cells; k++)
-    if (s->cells_top[k].sendto) {
-      cell_pack(&s->cells_top[k], &pcells[offset[k]]);
-      s->cells_top[k].pcell = &pcells[offset[k]];
-    }
+  struct pack_mapper_data data = {s, offset, pcells, with_gravity};
+  threadpool_map(&s->e->threadpool, proxy_cells_pack_mapper, s->cells_top,
+                 s->nr_cells, sizeof(struct cell), /*chunk=*/0, &data);
+
+  if (s->e->verbose)
+    message("Packing cells took %.3f %s.", clocks_from_ticks(getticks() - tic2),
+            clocks_getunit());
 
   /* Launch the first part of the exchange. */
+  threadpool_map(&s->e->threadpool, proxy_cells_exchange_first_mapper, proxies,
+                 num_proxies, sizeof(struct proxy), /*chunk=*/0,
+                 /*extra_data=*/NULL);
   for (int k = 0; k < num_proxies; k++) {
-    proxy_cells_exchange_first(&proxies[k]);
     reqs_in[k] = proxies[k].req_cells_count_in;
     reqs_out[k] = proxies[k].req_cells_count_out;
   }
@@ -231,6 +443,8 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
     reqs_out[k] = proxies[k].req_cells_out;
   }
 
+  tic2 = getticks();
+
   /* Wait for each pcell array to come in from the proxies. */
   for (int k = 0; k < num_proxies; k++) {
     int pid = MPI_UNDEFINED;
@@ -241,9 +455,13 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
     // message( "cell data from proxy %i has arrived." , pid );
     for (int count = 0, j = 0; j < proxies[pid].nr_cells_in; j++)
       count += cell_unpack(&proxies[pid].pcells_in[count],
-                           proxies[pid].cells_in[j], s);
+                           proxies[pid].cells_in[j], s, with_gravity);
   }
 
+  if (s->e->verbose)
+    message("Un-packing cells took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+
   /* Wait for all the sends to have finished too. */
   if (MPI_Waitall(num_proxies, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
     error("MPI_Waitall on sends failed.");
@@ -258,103 +476,9 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
 }
 
 /**
- * @brief Exchange cells with a remote node, first part.
- *
- * The first part of the transaction sends the local cell count and the packed
- * #pcell array to the destination node, and enqueues an @c MPI_Irecv for
- * the foreign cell counts.
- *
- * @param p The #proxy.
- */
-void proxy_cells_exchange_first(struct proxy *p) {
-
-#ifdef WITH_MPI
-
-  /* Get the number of pcells we will need to send. */
-  p->size_pcells_out = 0;
-  for (int k = 0; k < p->nr_cells_out; k++)
-    p->size_pcells_out += p->cells_out[k]->pcell_size;
-
-  /* Send the number of pcells. */
-  int err = MPI_Isend(&p->size_pcells_out, 1, MPI_INT, p->nodeID,
-                      p->mynodeID * proxy_tag_shift + proxy_tag_count,
-                      MPI_COMM_WORLD, &p->req_cells_count_out);
-  if (err != MPI_SUCCESS) mpi_error(err, "Failed to isend nr of pcells.");
-  // message( "isent pcell count (%i) from node %i to node %i." ,
-  // p->size_pcells_out , p->mynodeID , p->nodeID ); fflush(stdout);
-
-  /* Allocate and fill the pcell buffer. */
-  if (p->pcells_out != NULL) free(p->pcells_out);
-  if (posix_memalign((void **)&p->pcells_out, SWIFT_STRUCT_ALIGNMENT,
-                     sizeof(struct pcell) * p->size_pcells_out) != 0)
-    error("Failed to allocate pcell_out buffer.");
   memuse_report("pcells_out", sizeof(struct pcell) * p->size_pcells_out);
 
-  for (int ind = 0, k = 0; k < p->nr_cells_out; k++) {
-    memcpy(&p->pcells_out[ind], p->cells_out[k]->pcell,
-           sizeof(struct pcell) * p->cells_out[k]->pcell_size);
-    ind += p->cells_out[k]->pcell_size;
-  }
-
-  /* Send the pcell buffer. */
-  err = MPI_Isend(p->pcells_out, sizeof(struct pcell) * p->size_pcells_out,
-                  MPI_BYTE, p->nodeID,
-                  p->mynodeID * proxy_tag_shift + proxy_tag_cells,
-                  MPI_COMM_WORLD, &p->req_cells_out);
-
-  if (err != MPI_SUCCESS) mpi_error(err, "Failed to pcell_out buffer.");
-  // message( "isent pcells (%i) from node %i to node %i." , p->size_pcells_out
-  // , p->mynodeID , p->nodeID ); fflush(stdout);
-
-  /* Receive the number of pcells. */
-  err = MPI_Irecv(&p->size_pcells_in, 1, MPI_INT, p->nodeID,
-                  p->nodeID * proxy_tag_shift + proxy_tag_count, MPI_COMM_WORLD,
-                  &p->req_cells_count_in);
-  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv nr of pcells.");
-    // message( "irecv pcells count on node %i from node %i." , p->mynodeID ,
-    // p->nodeID ); fflush(stdout);
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
- * @brief Exchange cells with a remote node, second part.
- *
- * Once the incomming cell count has been received, allocate a buffer
- * for the foreign packed #pcell array and emit the @c MPI_Irecv for
- * it.
- *
- * @param p The #proxy.
- */
-void proxy_cells_exchange_second(struct proxy *p) {
-
-#ifdef WITH_MPI
-
-  /* Re-allocate the pcell_in buffer. */
-  if (p->pcells_in != NULL) free(p->pcells_in);
-  if (posix_memalign((void **)&p->pcells_in, SWIFT_STRUCT_ALIGNMENT,
-                     sizeof(struct pcell) * p->size_pcells_in) != 0)
-    error("Failed to allocate pcell_in buffer.");
   memuse_report("pcells_in", sizeof(struct pcell) * p->size_pcells_in);
-
-  /* Receive the particle buffers. */
-  int err = MPI_Irecv(p->pcells_in, sizeof(struct pcell) * p->size_pcells_in,
-                      MPI_BYTE, p->nodeID,
-                      p->nodeID * proxy_tag_shift + proxy_tag_cells,
-                      MPI_COMM_WORLD, &p->req_cells_in);
-
-  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv part data.");
-    // message( "irecv pcells (%i) on node %i from node %i." , p->size_pcells_in
-    // , p->mynodeID , p->nodeID ); fflush(stdout);
-
-#else
-  error("SWIFT was not compiled with MPI support.");
-#endif
-}
-
-/**
  * @brief Add a cell to the given proxy's input list.
  *
  * @param p The #proxy.
@@ -773,3 +897,19 @@ void proxy_init(struct proxy *p, int mynodeID, int nodeID) {
   }
   p->nr_sparts_out = 0;
 }
+
+/**
+ * @brief Registers the MPI types for the proxy cells.
+ */
+void proxy_create_mpi_type(void) {
+
+#ifdef WITH_MPI
+  if (MPI_Type_contiguous(sizeof(struct pcell) / sizeof(unsigned char),
+                          MPI_BYTE, &pcell_mpi_type) != MPI_SUCCESS ||
+      MPI_Type_commit(&pcell_mpi_type) != MPI_SUCCESS) {
+    error("Failed to create MPI type for parts.");
+  }
+#else
+  error("SWIFT was not compiled with MPI support.");
+#endif
+}
diff --git a/src/proxy.h b/src/proxy.h
index 63f51846d0bfb646e1f2f9209413437b4c966983..2e3f350333d9e6fdb09161f852cf3a143c60e7ce 100644
--- a/src/proxy.h
+++ b/src/proxy.h
@@ -102,10 +102,9 @@ void proxy_parts_exchange_second(struct proxy *p);
 void proxy_addcell_in(struct proxy *p, struct cell *c, int type);
 void proxy_addcell_out(struct proxy *p, struct cell *c, int type);
 void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
-                          struct space *s);
-void proxy_cells_exchange_first(struct proxy *p);
-void proxy_cells_exchange_second(struct proxy *p);
+                          struct space *s, int with_gravity);
 void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
                          struct space *s);
+void proxy_create_mpi_type(void);
 
 #endif /* SWIFT_PROXY_H */
diff --git a/src/random.h b/src/random.h
new file mode 100644
index 0000000000000000000000000000000000000000..660ae21db8dc78a8bde78b3f541bff6b621253cd
--- /dev/null
+++ b/src/random.h
@@ -0,0 +1,115 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *               2019 Folkert Nobels    (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *******************************************************************************/
+#ifndef SWIFT_RANDOM_H
+#define SWIFT_RANDOM_H
+
+/* COde configuration */
+#include "../config.h"
+
+/* Standard header */
+#include <stdlib.h>
+
+/**
+ * @brief The categories of random number generated.
+ *
+ * The values of the fields are carefully chose numbers
+ * the numbers are very large primes such that the IDs
+ * will not have a prime factorization with this coefficient
+ * this results in a very high period for the random number
+ * generator.
+ * Only change when you know what you are doing, changing
+ * the numbers to bad values will break the random number
+ * generator.
+ * In case new numbers need to be added other possible
+ * numbers could be:
+ * 4947009007, 5947309451, 6977309513
+ */
+enum random_number_type {
+  random_number_star_formation = 0LL,
+  random_number_stellar_feedback = 3947008991LL,
+  random_number_stellar_enrichment = 2936881973LL,
+  random_number_BH_feedback = 1640531371LL
+};
+
+/**
+ * @brief Returns a pseudo-random number in the range [0, 1[.
+ *
+ * We generate numbers that are always reproducible for a given particle ID and
+ * simulation time (on the integer time-line). If more than one number per
+ * time-step per particle is needed, additional randomness can be obtained by
+ * using the type argument.
+ *
+ * @param id The ID of the particle for which to generate a number.
+ * @param ti_current The time (on the time-line) for which to generate a number.
+ * @param type The #random_number_type to generate.
+ * @return a random number in the interval [0, 1.[.
+ */
+INLINE static double random_unit_interval(const long long int id,
+                                          const integertime_t ti_current,
+                                          const enum random_number_type type) {
+
+  /* Range used for the seeds. Best if prime */
+  static const long long seed_range = RAND_MAX;
+  static const double RAND_MAX_inv = 1. / ((double)RAND_MAX);
+  static const long long mwc_number = (1LL << 32) - 1LL;
+
+  /* Calculate the seed */
+  /* WARNING: Only change the math if you really know what you are doing!
+   * The numbers are carefully chosen prime numbers that prevent correlation
+   * with either the current integer time or the particle IDs. The current
+   * method also prevents any correlation between different random number
+   * types.
+   * The calculation overflows on purpose.
+   * 1. The first step is calculating the seed by using a multiply with carry
+   * (MWC) method, this method depends on the type of random number and
+   * this therefore also prevents that there is any correlation between
+   * the different types of random numbers.
+   * 2. After this we use the 64 bit Xorshift method to randomize the seeds
+   * even more.
+   * 3. We calculate a prime multiplication for the id with a quadratic
+   * term.
+   * 4. We calculate the seed by using a Quadratic congruential generator,
+   * in which we use the id part and the current time step bin.
+   */
+  unsigned long long number = ti_current;
+
+  /* Multiply with carry (MWC), (adviced variables by NR) */
+  number = 4294957665LL * (number & (mwc_number)) + (number >> 32);
+
+  /* 64-bit Xorshift (adviced variables by NR) */
+  number ^= number << 21;
+  number ^= number >> 35;
+  number ^= number << 4;
+
+  /* Add constant to ID */
+  const unsigned long long idt = id + type;
+
+  /* Nonlinear congruential generator */
+  const unsigned long long idpart =
+      3457LL * idt + 593LL * idt * ti_current + 5417LL * idt * idt;
+  unsigned int seed =
+      (937LL * number + 5171LL * number * number + idpart + 1109LL) %
+      9996361LL % seed_range;
+
+  /* Generate a random number between 0 and 1. */
+  return rand_r(&seed) * RAND_MAX_inv;
+}
+
+#endif /* SWIFT_RANDOM_H */
diff --git a/src/restart.c b/src/restart.c
index c412c8477d9f93e7c085e13c9e3fe72cd0cab9df..54a098413d7a393ac88a7ef5d7300d912c99f845 100644
--- a/src/restart.c
+++ b/src/restart.c
@@ -334,3 +334,17 @@ void restart_remove_previous(const char *filename) {
     }
   }
 }
+
+/**
+ * @brief Run a given command, usually to resubmit a job.
+ *
+ * No check is done on the command being run.
+ *
+ * @param command The command to run in the system's shell.
+ */
+void restart_resubmit(const char *command) {
+
+  /* Let's trust the user's command... */
+  const int result = system(command);
+  if (result != 0) message("Command returned error code %d", result);
+}
diff --git a/src/restart.h b/src/restart.h
index 49d127492255364cbf0f48653c560494e83a2920..b9380201659dacf05fcedad8c9fcb29e7bd89be2 100644
--- a/src/restart.h
+++ b/src/restart.h
@@ -41,4 +41,6 @@ int restart_stop_now(const char *dir, int cleanup);
 void restart_save_previous(const char *filename);
 void restart_remove_previous(const char *filename);
 
+void restart_resubmit(const char *command);
+
 #endif /* SWIFT_RESTART_H */
diff --git a/src/runner.c b/src/runner.c
index 31c9e61e3b1222acdf7a11ce984892a0b23c44e5..316fda132fdb4d041bb9002364cc6d0683f0db06 100644
--- a/src/runner.c
+++ b/src/runner.c
@@ -48,23 +48,28 @@
 #include "debug.h"
 #include "drift.h"
 #include "engine.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "gravity.h"
 #include "hydro.h"
 #include "hydro_properties.h"
 #include "kick.h"
+#include "logger.h"
 #include "memuse.h"
 #include "minmax.h"
 #include "runner_doiact_vec.h"
 #include "scheduler.h"
 #include "sort_part.h"
-#include "sourceterms.h"
 #include "space.h"
 #include "space_getsid.h"
+#include "star_formation.h"
+#include "star_formation_iact.h"
 #include "stars.h"
 #include "task.h"
 #include "timers.h"
 #include "timestep.h"
+#include "timestep_limiter.h"
+#include "tracers.h"
 
 #define TASK_LOOP_DENSITY 0
 #define TASK_LOOP_GRADIENT 1
@@ -94,43 +99,317 @@
 #undef FUNCTION
 #undef FUNCTION_TASK_LOOP
 
+/* Import the limiter loop functions. */
+#define FUNCTION limiter
+#define FUNCTION_TASK_LOOP TASK_LOOP_LIMITER
+#include "runner_doiact.h"
+#undef FUNCTION
+#undef FUNCTION_TASK_LOOP
+
 /* Import the gravity loop functions. */
 #include "runner_doiact_grav.h"
 
+/* Import the stars density loop functions. */
+#define FUNCTION density
+#define UPDATE_STARS 1
+#include "runner_doiact_stars.h"
+#undef UPDATE_STARS
+#undef FUNCTION
+
+/* Import the stars feedback loop functions. */
+#define FUNCTION feedback
+#include "runner_doiact_stars.h"
+#undef FUNCTION
+
 /**
- * @brief Perform source terms
+ * @brief Intermediate task after the density to check that the smoothing
+ * lengths are correct.
  *
- * @param r runner task
- * @param c cell
- * @param timer 1 if the time is to be recorded.
+ * @param r The runner thread.
+ * @param c The cell.
+ * @param timer Are we timing this ?
  */
-void runner_do_sourceterms(struct runner *r, struct cell *c, int timer) {
-  const int count = c->count;
-  const double cell_min[3] = {c->loc[0], c->loc[1], c->loc[2]};
-  const double cell_width[3] = {c->width[0], c->width[1], c->width[2]};
-  struct sourceterms *sourceterms = r->e->sourceterms;
-  const int dimen = 3;
+void runner_do_stars_ghost(struct runner *r, struct cell *c, int timer) {
+
+  struct spart *restrict sparts = c->stars.parts;
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const float stars_h_max = e->hydro_properties->h_max;
+  const float stars_h_min = e->hydro_properties->h_min;
+  const float eps = e->stars_properties->h_tolerance;
+  const float stars_eta_dim =
+      pow_dimension(e->stars_properties->eta_neighbours);
+  const int max_smoothing_iter = e->stars_properties->max_smoothing_iterations;
+  int redo = 0, scount = 0;
 
   TIMER_TIC;
 
+  /* Anything to do here? */
+  if (!cell_is_active_stars(c, e)) return;
+
   /* Recurse? */
   if (c->split) {
     for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL) runner_do_sourceterms(r, c->progeny[k], 0);
+      if (c->progeny[k] != NULL) runner_do_stars_ghost(r, c->progeny[k], 0);
   } else {
 
-    if (count > 0) {
+    /* Init the list of active particles that have to be updated. */
+    int *sid = NULL;
+    float *h_0 = NULL;
+    float *left = NULL;
+    float *right = NULL;
+    if ((sid = (int *)malloc(sizeof(int) * c->stars.count)) == NULL)
+      error("Can't allocate memory for sid.");
+    if ((h_0 = (float *)malloc(sizeof(float) * c->stars.count)) == NULL)
+      error("Can't allocate memory for h_0.");
+    if ((left = (float *)malloc(sizeof(float) * c->stars.count)) == NULL)
+      error("Can't allocate memory for left.");
+    if ((right = (float *)malloc(sizeof(float) * c->stars.count)) == NULL)
+      error("Can't allocate memory for right.");
+    for (int k = 0; k < c->stars.count; k++)
+      if (spart_is_active(&sparts[k], e)) {
+        sid[scount] = k;
+        h_0[scount] = sparts[k].h;
+        left[scount] = 0.f;
+        right[scount] = stars_h_max;
+        ++scount;
+      }
+
+    /* While there are particles that need to be updated... */
+    for (int num_reruns = 0; scount > 0 && num_reruns < max_smoothing_iter;
+         num_reruns++) {
+
+      /* Reset the redo-count. */
+      redo = 0;
+
+      /* Loop over the remaining active parts in this cell. */
+      for (int i = 0; i < scount; i++) {
+
+        /* Get a direct pointer on the part. */
+        struct spart *sp = &sparts[sid[i]];
+
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Is this part within the timestep? */
+        if (!spart_is_active(sp, e))
+          error("Ghost applied to inactive particle");
+#endif
+
+        /* Get some useful values */
+        const float h_init = h_0[i];
+        const float h_old = sp->h;
+        const float h_old_dim = pow_dimension(h_old);
+        const float h_old_dim_minus_one = pow_dimension_minus_one(h_old);
+
+        float h_new;
+        int has_no_neighbours = 0;
+
+        if (sp->density.wcount == 0.f) { /* No neighbours case */
+
+          /* Flag that there were no neighbours */
+          has_no_neighbours = 1;
+
+          /* Double h and try again */
+          h_new = 2.f * h_old;
+
+        } else {
+
+          /* Finish the density calculation */
+          stars_end_density(sp, cosmo);
+
+          /* Compute one step of the Newton-Raphson scheme */
+          const float n_sum = sp->density.wcount * h_old_dim;
+          const float n_target = stars_eta_dim;
+          const float f = n_sum - n_target;
+          const float f_prime =
+              sp->density.wcount_dh * h_old_dim +
+              hydro_dimension * sp->density.wcount * h_old_dim_minus_one;
+
+          /* Improve the bisection bounds */
+          if (n_sum < n_target)
+            left[i] = max(left[i], h_old);
+          else if (n_sum > n_target)
+            right[i] = min(right[i], h_old);
+
+#ifdef SWIFT_DEBUG_CHECKS
+          /* Check the validity of the left and right bounds */
+          if (left[i] > right[i])
+            error("Invalid left (%e) and right (%e)", left[i], right[i]);
+#endif
+
+          /* Skip if h is already h_max and we don't have enough neighbours */
+          /* Same if we are below h_min */
+          if (((sp->h >= stars_h_max) && (f < 0.f)) ||
+              ((sp->h <= stars_h_min) && (f > 0.f))) {
+
+            stars_reset_feedback(sp);
+
+            /* Ok, we are done with this particle */
+            continue;
+          }
+
+          /* Normal case: Use Newton-Raphson to get a better value of h */
+
+          /* Avoid floating point exception from f_prime = 0 */
+          h_new = h_old - f / (f_prime + FLT_MIN);
+
+          /* Be verbose about the particles that struggle to converge */
+          if (num_reruns > max_smoothing_iter - 10) {
+
+            message(
+                "Smoothing length convergence problem: iter=%d p->id=%lld "
+                "h_init=%12.8e h_old=%12.8e h_new=%12.8e f=%f f_prime=%f "
+                "n_sum=%12.8e n_target=%12.8e left=%12.8e right=%12.8e",
+                num_reruns, sp->id, h_init, h_old, h_new, f, f_prime, n_sum,
+                n_target, left[i], right[i]);
+          }
+
+#ifdef SWIFT_DEBUG_CHECKS
+          if ((f > 0.f && h_new > h_old) || (f < 0.f && h_new < h_old))
+            error(
+                "Smoothing length correction not going in the right direction");
+#endif
+
+          /* Safety check: truncate to the range [ h_old/2 , 2h_old ]. */
+          h_new = min(h_new, 2.f * h_old);
+          h_new = max(h_new, 0.5f * h_old);
+
+          /* Verify that we are actually progrssing towards the answer */
+          h_new = max(h_new, left[i]);
+          h_new = min(h_new, right[i]);
+        }
+
+        /* Check whether the particle has an inappropriate smoothing length */
+        if (fabsf(h_new - h_old) > eps * h_old) {
+
+          /* Ok, correct then */
+
+          /* Case where we have been oscillating around the solution */
+          if ((h_new == left[i] && h_old == right[i]) ||
+              (h_old == left[i] && h_new == right[i])) {
+
+            /* Bissect the remaining interval */
+            sp->h = pow_inv_dimension(
+                0.5f * (pow_dimension(left[i]) + pow_dimension(right[i])));
+
+          } else {
+
+            /* Normal case */
+            sp->h = h_new;
+          }
+
+          /* If below the absolute maximum, try again */
+          if (sp->h < stars_h_max && sp->h > stars_h_min) {
+
+            /* Flag for another round of fun */
+            sid[redo] = sid[i];
+            h_0[redo] = h_0[i];
+            left[redo] = left[i];
+            right[redo] = right[i];
+            redo += 1;
+
+            /* Re-initialise everything */
+            stars_init_spart(sp);
+
+            /* Off we go ! */
+            continue;
+
+          } else if (sp->h <= stars_h_min) {
+
+            /* Ok, this particle is a lost cause... */
+            sp->h = stars_h_min;
+
+          } else if (sp->h >= stars_h_max) {
 
-      /* do sourceterms in this cell? */
-      const int incell =
-          sourceterms_test_cell(cell_min, cell_width, sourceterms, dimen);
-      if (incell == 1) {
-        sourceterms_apply(r, sourceterms, c);
+            /* Ok, this particle is a lost cause... */
+            sp->h = stars_h_max;
+
+            /* Do some damage control if no neighbours at all were found */
+            if (has_no_neighbours) {
+              stars_spart_has_no_neighbours(sp, cosmo);
+            }
+
+          } else {
+            error(
+                "Fundamental problem with the smoothing length iteration "
+                "logic.");
+          }
+        }
+
+        /* We now have a particle whose smoothing length has converged */
+        stars_reset_feedback(sp);
+
+        /* Compute the stellar evolution  */
+        stars_evolve_spart(sp, e->stars_properties, cosmo);
+      }
+
+      /* We now need to treat the particles whose smoothing length had not
+       * converged again */
+
+      /* Re-set the counter for the next loop (potentially). */
+      scount = redo;
+      if (scount > 0) {
+
+        /* Climb up the cell hierarchy. */
+        for (struct cell *finger = c; finger != NULL; finger = finger->parent) {
+
+          /* Run through this cell's density interactions. */
+          for (struct link *l = finger->stars.density; l != NULL; l = l->next) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+            if (l->t->ti_run < r->e->ti_current)
+              error("Density task should have been run.");
+#endif
+
+            /* Self-interaction? */
+            if (l->t->type == task_type_self)
+              runner_doself_subset_branch_stars_density(r, finger, sparts, sid,
+                                                        scount);
+
+            /* Otherwise, pair interaction? */
+            else if (l->t->type == task_type_pair) {
+
+              /* Left or right? */
+              if (l->t->ci == finger)
+                runner_dopair_subset_branch_stars_density(
+                    r, finger, sparts, sid, scount, l->t->cj);
+              else
+                runner_dopair_subset_branch_stars_density(
+                    r, finger, sparts, sid, scount, l->t->ci);
+            }
+
+            /* Otherwise, sub-self interaction? */
+            else if (l->t->type == task_type_sub_self)
+              runner_dosub_subset_stars_density(r, finger, sparts, sid, scount,
+                                                NULL, -1, 1);
+
+            /* Otherwise, sub-pair interaction? */
+            else if (l->t->type == task_type_sub_pair) {
+
+              /* Left or right? */
+              if (l->t->ci == finger)
+                runner_dosub_subset_stars_density(r, finger, sparts, sid,
+                                                  scount, l->t->cj, -1, 1);
+              else
+                runner_dosub_subset_stars_density(r, finger, sparts, sid,
+                                                  scount, l->t->ci, -1, 1);
+            }
+          }
+        }
       }
     }
+
+    if (scount) {
+      error("Smoothing length failed to converge on %i particles.", scount);
+    }
+
+    /* Be clean */
+    free(left);
+    free(right);
+    free(sid);
+    free(h_0);
   }
 
-  if (timer) TIMER_TOC(timer_dosource);
+  if (timer) TIMER_TOC(timer_dostars_ghost);
 }
 
 /**
@@ -142,8 +421,8 @@ void runner_do_sourceterms(struct runner *r, struct cell *c, int timer) {
  */
 void runner_do_grav_external(struct runner *r, struct cell *c, int timer) {
 
-  struct gpart *restrict gparts = c->gparts;
-  const int gcount = c->gcount;
+  struct gpart *restrict gparts = c->grav.parts;
+  const int gcount = c->grav.count;
   const struct engine *e = r->e;
   const struct external_potential *potential = e->external_potential;
   const struct phys_const *constants = e->physical_constants;
@@ -185,8 +464,8 @@ void runner_do_grav_external(struct runner *r, struct cell *c, int timer) {
  */
 void runner_do_grav_mesh(struct runner *r, struct cell *c, int timer) {
 
-  struct gpart *restrict gparts = c->gparts;
-  const int gcount = c->gcount;
+  struct gpart *restrict gparts = c->grav.parts;
+  const int gcount = c->grav.count;
   const struct engine *e = r->e;
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -227,11 +506,13 @@ void runner_do_cooling(struct runner *r, struct cell *c, int timer) {
   const struct cooling_function_data *cooling_func = e->cooling_func;
   const struct phys_const *constants = e->physical_constants;
   const struct unit_system *us = e->internal_units;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct entropy_floor_properties *entropy_floor_props = e->entropy_floor;
   const double time_base = e->time_base;
   const integertime_t ti_current = e->ti_current;
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  const int count = c->hydro.count;
 
   TIMER_TIC;
 
@@ -253,19 +534,26 @@ void runner_do_cooling(struct runner *r, struct cell *c, int timer) {
 
       if (part_is_active(p, e)) {
 
-        double dt_cool;
+        double dt_cool, dt_therm;
         if (with_cosmology) {
           const integertime_t ti_step = get_integer_timestep(p->time_bin);
           const integertime_t ti_begin =
-              get_integer_time_begin(ti_current + 1, p->time_bin);
+              get_integer_time_begin(ti_current - 1, p->time_bin);
+
           dt_cool =
               cosmology_get_delta_time(cosmo, ti_begin, ti_begin + ti_step);
+          dt_therm = cosmology_get_therm_kick_factor(e->cosmology, ti_begin,
+                                                     ti_begin + ti_step);
+
         } else {
           dt_cool = get_timestep(p->time_bin, time_base);
+          dt_therm = get_timestep(p->time_bin, time_base);
         }
 
         /* Let's cool ! */
-        cooling_cool_part(constants, us, cosmo, cooling_func, p, xp, dt_cool);
+        cooling_cool_part(constants, us, cosmo, hydro_props,
+                          entropy_floor_props, cooling_func, p, xp, dt_cool,
+                          dt_therm);
       }
     }
   }
@@ -273,6 +561,104 @@ void runner_do_cooling(struct runner *r, struct cell *c, int timer) {
   if (timer) TIMER_TOC(timer_do_cooling);
 }
 
+/**
+ *
+ */
+void runner_do_star_formation(struct runner *r, struct cell *c, int timer) {
+
+  struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const struct star_formation *sf_props = e->star_formation;
+  const struct phys_const *phys_const = e->physical_constants;
+  const int count = c->hydro.count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const int with_feedback = (e->policy & engine_policy_feedback);
+  const struct hydro_props *restrict hydro_props = e->hydro_properties;
+  const struct unit_system *restrict us = e->internal_units;
+  struct cooling_function_data *restrict cooling = e->cooling_func;
+  const double time_base = e->time_base;
+  const integertime_t ti_current = e->ti_current;
+  const int current_stars_count = c->stars.count;
+
+  TIMER_TIC;
+
+  /* Anything to do here? */
+  if (!cell_is_active_hydro(c, e)) return;
+
+  /* Recurse? */
+  if (c->split) {
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL) runner_do_star_formation(r, c->progeny[k], 0);
+  } else {
+
+    /* Loop over the gas particles in this cell. */
+    for (int k = 0; k < count; k++) {
+
+      /* Get a handle on the part. */
+      struct part *restrict p = &parts[k];
+      struct xpart *restrict xp = &xparts[k];
+
+      /* Only work on active particles */
+      if (part_is_active(p, e)) {
+
+        /* Is this particle star forming? */
+        if (star_formation_is_star_forming(p, xp, sf_props, phys_const, cosmo,
+                                           hydro_props, us, cooling)) {
+
+          /* Time-step size for this particle */
+          double dt_star;
+          if (with_cosmology) {
+            const integertime_t ti_step = get_integer_timestep(p->time_bin);
+            const integertime_t ti_begin =
+                get_integer_time_begin(ti_current - 1, p->time_bin);
+
+            dt_star =
+                cosmology_get_delta_time(cosmo, ti_begin, ti_begin + ti_step);
+
+          } else {
+            dt_star = get_timestep(p->time_bin, time_base);
+          }
+
+          /* Compute the SF rate of the particle */
+          star_formation_compute_SFR(p, xp, sf_props, phys_const, cosmo,
+                                     dt_star);
+
+          /* Are we forming a star particle from this SF rate? */
+          if (star_formation_should_convert_to_star(p, xp, sf_props, e,
+                                                    dt_star)) {
+
+            /* Convert the gas particle to a star particle */
+            struct spart *sp = cell_convert_part_to_spart(e, c, p, xp);
+
+            /* Copy the properties of the gas particle to the star particle */
+            star_formation_copy_properties(p, xp, sp, e, sf_props, cosmo,
+                                           with_cosmology);
+          }
+
+        } else { /* Are we not star-forming? */
+
+          /* Update the particle to flag it as not star-forming */
+          star_formation_update_part_not_SFR(p, xp, e, sf_props,
+                                             with_cosmology);
+
+        } /* Not Star-forming? */
+      }   /* is active? */
+    }     /* Loop over particles */
+  }
+
+  /* If we formed any stars, the star sorts are now invalid. We need to
+   * re-compute them. */
+  if (with_feedback && (c == c->hydro.super) &&
+      (current_stars_count != c->stars.count)) {
+    cell_clear_stars_sort_flags(c, /*is_super=*/1);
+    runner_do_stars_sort(r, c, 0x1FFF, /*cleanup=*/0, /*timer=*/0);
+  }
+
+  if (timer) TIMER_TOC(timer_do_star_formation);
+}
+
 /**
  * @brief Sort the entries in ascending order using QuickSort.
  *
@@ -351,26 +737,30 @@ void runner_do_sort_ascending(struct entry *sort, int N) {
   }
 }
 
+#ifdef SWIFT_DEBUG_CHECKS
 /**
  * @brief Recursively checks that the flags are consistent in a cell hierarchy.
  *
- * Debugging function.
- *
- * @param c The #cell to check.
- * @param flags The sorting flags to check.
+ * Debugging function. Exists in two flavours: hydro & stars.
  */
-void runner_check_sorts(struct cell *c, int flags) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-  if (flags & ~c->sorted) error("Inconsistent sort flags (downward)!");
-  if (c->split)
-    for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL && c->progeny[k]->count > 0)
-        runner_check_sorts(c->progeny[k], c->sorted);
+#define RUNNER_CHECK_SORTS(TYPE)                                               \
+  void runner_check_sorts_##TYPE(struct cell *c, int flags) {                  \
+                                                                               \
+    if (flags & ~c->TYPE.sorted) error("Inconsistent sort flags (downward)!"); \
+    if (c->split)                                                              \
+      for (int k = 0; k < 8; k++)                                              \
+        if (c->progeny[k] != NULL && c->progeny[k]->TYPE.count > 0)            \
+          runner_check_sorts_##TYPE(c->progeny[k], c->TYPE.sorted);            \
+  }
 #else
-  error("Calling debugging code without debugging flag activated.");
+#define RUNNER_CHECK_SORTS(TYPE)                                       \
+  void runner_check_sorts_##TYPE(struct cell *c, int flags) {          \
+    error("Calling debugging code without debugging flag activated."); \
+  }
 #endif
-}
+
+RUNNER_CHECK_SORTS(hydro)
+RUNNER_CHECK_SORTS(stars)
 
 /**
  * @brief Sort the particles in the given cell along all cardinal directions.
@@ -383,25 +773,25 @@ void runner_check_sorts(struct cell *c, int flags) {
  * @param clock Flag indicating whether to record the timing or not, needed
  *      for recursive calls.
  */
-void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
-                    int clock) {
+void runner_do_hydro_sort(struct runner *r, struct cell *c, int flags,
+                          int cleanup, int clock) {
 
   struct entry *fingers[8];
-  const int count = c->count;
-  const struct part *parts = c->parts;
-  struct xpart *xparts = c->xparts;
+  const int count = c->hydro.count;
+  const struct part *parts = c->hydro.parts;
+  struct xpart *xparts = c->hydro.xparts;
   float buff[8];
 
   TIMER_TIC;
 
   /* We need to do the local sorts plus whatever was requested further up. */
-  flags |= c->do_sort;
+  flags |= c->hydro.do_sort;
   if (cleanup) {
-    c->sorted = 0;
+    c->hydro.sorted = 0;
   } else {
-    flags &= ~c->sorted;
+    flags &= ~c->hydro.sorted;
   }
-  if (flags == 0 && !c->do_sub_sort) return;
+  if (flags == 0 && !c->hydro.do_sub_sort) return;
 
   /* Check that the particles have been moved to the current time */
   if (flags && !cell_are_part_drifted(c, r->e))
@@ -409,25 +799,26 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Make sure the sort flags are consistent (downward). */
-  runner_check_sorts(c, c->sorted);
+  runner_check_sorts_hydro(c, c->hydro.sorted);
 
   /* Make sure the sort flags are consistent (upard). */
   for (struct cell *finger = c->parent; finger != NULL;
        finger = finger->parent) {
-    if (finger->sorted & ~c->sorted) error("Inconsistent sort flags (upward).");
+    if (finger->hydro.sorted & ~c->hydro.sorted)
+      error("Inconsistent sort flags (upward).");
   }
 
   /* Update the sort timer which represents the last time the sorts
      were re-set. */
-  if (c->sorted == 0) c->ti_sort = r->e->ti_current;
+  if (c->hydro.sorted == 0) c->hydro.ti_sort = r->e->ti_current;
 #endif
 
   /* start by allocating the entry arrays in the requested dimensions. */
   size_t allocated = 0;
   for (int j = 0; j < 13; j++) {
-    if ((flags & (1 << j)) && c->sort[j] == NULL) {
-      if ((c->sort[j] = (struct entry *)malloc(sizeof(struct entry) *
-                                               (count + 1))) == NULL)
+    if ((flags & (1 << j)) && c->hydro.sort[j] == NULL) {
+      if ((c->hydro.sort[j] = (struct entry *)malloc(sizeof(struct entry) *
+                                                     (count + 1))) == NULL)
         error("Failed to allocate sort memory.");
       allocated += sizeof(struct entry) * (count + 1);
     }
@@ -441,18 +832,19 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
     float dx_max_sort = 0.0f;
     float dx_max_sort_old = 0.0f;
     for (int k = 0; k < 8; k++) {
-      if (c->progeny[k] != NULL && c->progeny[k]->count > 0) {
+      if (c->progeny[k] != NULL && c->progeny[k]->hydro.count > 0) {
         /* Only propagate cleanup if the progeny is stale. */
-        runner_do_sort(r, c->progeny[k], flags,
-                       cleanup && (c->progeny[k]->dx_max_sort >
-                                   space_maxreldx * c->progeny[k]->dmin),
-                       0);
-        dx_max_sort = max(dx_max_sort, c->progeny[k]->dx_max_sort);
-        dx_max_sort_old = max(dx_max_sort_old, c->progeny[k]->dx_max_sort_old);
+        runner_do_hydro_sort(r, c->progeny[k], flags,
+                             cleanup && (c->progeny[k]->hydro.dx_max_sort_old >
+                                         space_maxreldx * c->progeny[k]->dmin),
+                             0);
+        dx_max_sort = max(dx_max_sort, c->progeny[k]->hydro.dx_max_sort);
+        dx_max_sort_old =
+            max(dx_max_sort_old, c->progeny[k]->hydro.dx_max_sort_old);
       }
     }
-    c->dx_max_sort = dx_max_sort;
-    c->dx_max_sort_old = dx_max_sort_old;
+    c->hydro.dx_max_sort = dx_max_sort;
+    c->hydro.dx_max_sort_old = dx_max_sort_old;
 
     /* Loop over the 13 different sort arrays. */
     for (int j = 0; j < 13; j++) {
@@ -465,7 +857,7 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
       off[0] = 0;
       for (int k = 1; k < 8; k++)
         if (c->progeny[k - 1] != NULL)
-          off[k] = off[k - 1] + c->progeny[k - 1]->count;
+          off[k] = off[k - 1] + c->progeny[k - 1]->hydro.count;
         else
           off[k] = off[k - 1];
 
@@ -473,8 +865,8 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
       int inds[8];
       for (int k = 0; k < 8; k++) {
         inds[k] = k;
-        if (c->progeny[k] != NULL && c->progeny[k]->count > 0) {
-          fingers[k] = c->progeny[k]->sort[j];
+        if (c->progeny[k] != NULL && c->progeny[k]->hydro.count > 0) {
+          fingers[k] = c->progeny[k]->hydro.sort[j];
           buff[k] = fingers[k]->d;
           off[k] = off[k];
         } else
@@ -491,7 +883,7 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
           }
 
       /* For each entry in the new sort list. */
-      struct entry *finger = c->sort[j];
+      struct entry *finger = c->hydro.sort[j];
       for (int ind = 0; ind < count; ind++) {
 
         /* Copy the minimum into the new sort array. */
@@ -512,11 +904,11 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
       } /* Merge. */
 
       /* Add a sentinel. */
-      c->sort[j][count].d = FLT_MAX;
-      c->sort[j][count].i = 0;
+      c->hydro.sort[j][count].d = FLT_MAX;
+      c->hydro.sort[j][count].i = 0;
 
       /* Mark as sorted. */
-      atomic_or(&c->sorted, 1 << j);
+      atomic_or(&c->hydro.sorted, 1 << j);
 
     } /* loop over sort arrays. */
 
@@ -526,7 +918,7 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
   else {
 
     /* Reset the sort distance */
-    if (c->sorted == 0) {
+    if (c->hydro.sorted == 0) {
 #ifdef SWIFT_DEBUG_CHECKS
       if (xparts != NULL && c->nodeID != engine_rank)
         error("Have non-NULL xparts in foreign cell");
@@ -540,8 +932,8 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
           xparts[k].x_diff_sort[2] = 0.0f;
         }
       }
-      c->dx_max_sort_old = 0.f;
-      c->dx_max_sort = 0.f;
+      c->hydro.dx_max_sort_old = 0.f;
+      c->hydro.dx_max_sort = 0.f;
     }
 
     /* Fill the sort array. */
@@ -549,20 +941,20 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
       const double px[3] = {parts[k].x[0], parts[k].x[1], parts[k].x[2]};
       for (int j = 0; j < 13; j++)
         if (flags & (1 << j)) {
-          c->sort[j][k].i = k;
-          c->sort[j][k].d = px[0] * runner_shift[j][0] +
-                            px[1] * runner_shift[j][1] +
-                            px[2] * runner_shift[j][2];
+          c->hydro.sort[j][k].i = k;
+          c->hydro.sort[j][k].d = px[0] * runner_shift[j][0] +
+                                  px[1] * runner_shift[j][1] +
+                                  px[2] * runner_shift[j][2];
         }
     }
 
     /* Add the sentinel and sort. */
     for (int j = 0; j < 13; j++)
       if (flags & (1 << j)) {
-        c->sort[j][count].d = FLT_MAX;
-        c->sort[j][count].i = 0;
-        runner_do_sort_ascending(c->sort[j], count);
-        atomic_or(&c->sorted, 1 << j);
+        c->hydro.sort[j][count].d = FLT_MAX;
+        c->hydro.sort[j][count].i = 0;
+        runner_do_sort_ascending(c->hydro.sort[j], count);
+        atomic_or(&c->hydro.sorted, 1 << j);
       }
   }
 
@@ -570,7 +962,7 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
   /* Verify the sorting. */
   for (int j = 0; j < 13; j++) {
     if (!(flags & (1 << j))) continue;
-    struct entry *finger = c->sort[j];
+    struct entry *finger = c->hydro.sort[j];
     for (int k = 1; k < count; k++) {
       if (finger[k].d < finger[k - 1].d)
         error("Sorting failed, ascending array.");
@@ -579,23 +971,243 @@ void runner_do_sort(struct runner *r, struct cell *c, int flags, int cleanup,
   }
 
   /* Make sure the sort flags are consistent (downward). */
-  runner_check_sorts(c, flags);
+  runner_check_sorts_hydro(c, flags);
 
   /* Make sure the sort flags are consistent (upward). */
   for (struct cell *finger = c->parent; finger != NULL;
        finger = finger->parent) {
-    if (finger->sorted & ~c->sorted) error("Inconsistent sort flags.");
+    if (finger->hydro.sorted & ~c->hydro.sorted)
+      error("Inconsistent sort flags.");
   }
 #endif
 
   /* Clear the cell's sort flags. */
-  c->do_sort = 0;
-  c->do_sub_sort = 0;
-  c->requires_sorts = 0;
+  c->hydro.do_sort = 0;
+  c->hydro.do_sub_sort = 0;
+  c->hydro.requires_sorts = 0;
 
   if (clock) TIMER_TOC(timer_dosort);
 }
 
+/**
+ * @brief Sort the stars particles in the given cell along all cardinal
+ * directions.
+ *
+ * @param r The #runner.
+ * @param c The #cell.
+ * @param flags Cell flag.
+ * @param cleanup If true, re-build the sorts for the selected flags instead
+ *        of just adding them.
+ * @param clock Flag indicating whether to record the timing or not, needed
+ *      for recursive calls.
+ */
+void runner_do_stars_sort(struct runner *r, struct cell *c, int flags,
+                          int cleanup, int clock) {
+
+  struct entry *fingers[8];
+  const int count = c->stars.count;
+  struct spart *sparts = c->stars.parts;
+  float buff[8];
+
+  TIMER_TIC;
+
+  /* We need to do the local sorts plus whatever was requested further up. */
+  flags |= c->stars.do_sort;
+  if (cleanup) {
+    c->stars.sorted = 0;
+  } else {
+    flags &= ~c->stars.sorted;
+  }
+  if (flags == 0 && !c->stars.do_sub_sort) return;
+
+  /* Check that the particles have been moved to the current time */
+  if (flags && !cell_are_spart_drifted(c, r->e)) {
+    error("Sorting un-drifted cell c->nodeID=%d", c->nodeID);
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Make sure the sort flags are consistent (downward). */
+  runner_check_sorts_stars(c, c->stars.sorted);
+
+  /* Make sure the sort flags are consistent (upward). */
+  for (struct cell *finger = c->parent; finger != NULL;
+       finger = finger->parent) {
+    if (finger->stars.sorted & ~c->stars.sorted)
+      error("Inconsistent sort flags (upward).");
+  }
+
+  /* Update the sort timer which represents the last time the sorts
+     were re-set. */
+  if (c->stars.sorted == 0) c->stars.ti_sort = r->e->ti_current;
+#endif
+
+  /* start by allocating the entry arrays in the requested dimensions. */
+  for (int j = 0; j < 13; j++) {
+    if ((flags & (1 << j)) && c->stars.sort[j] == NULL) {
+      if ((c->stars.sort[j] = (struct entry *)malloc(sizeof(struct entry) *
+                                                     (count + 1))) == NULL)
+        error("Failed to allocate sort memory.");
+    }
+  }
+
+  /* Does this cell have any progeny? */
+  if (c->split) {
+
+    /* Fill in the gaps within the progeny. */
+    float dx_max_sort = 0.0f;
+    float dx_max_sort_old = 0.0f;
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL && c->progeny[k]->stars.count > 0) {
+        /* Only propagate cleanup if the progeny is stale. */
+        const int cleanup_prog =
+            cleanup && (c->progeny[k]->stars.dx_max_sort_old >
+                        space_maxreldx * c->progeny[k]->dmin);
+        runner_do_stars_sort(r, c->progeny[k], flags, cleanup_prog, 0);
+        dx_max_sort = max(dx_max_sort, c->progeny[k]->stars.dx_max_sort);
+        dx_max_sort_old =
+            max(dx_max_sort_old, c->progeny[k]->stars.dx_max_sort_old);
+      }
+    }
+    c->stars.dx_max_sort = dx_max_sort;
+    c->stars.dx_max_sort_old = dx_max_sort_old;
+
+    /* Loop over the 13 different sort arrays. */
+    for (int j = 0; j < 13; j++) {
+
+      /* Has this sort array been flagged? */
+      if (!(flags & (1 << j))) continue;
+
+      /* Init the particle index offsets. */
+      int off[8];
+      off[0] = 0;
+      for (int k = 1; k < 8; k++)
+        if (c->progeny[k - 1] != NULL)
+          off[k] = off[k - 1] + c->progeny[k - 1]->stars.count;
+        else
+          off[k] = off[k - 1];
+
+      /* Init the entries and indices. */
+      int inds[8];
+      for (int k = 0; k < 8; k++) {
+        inds[k] = k;
+        if (c->progeny[k] != NULL && c->progeny[k]->stars.count > 0) {
+          fingers[k] = c->progeny[k]->stars.sort[j];
+          buff[k] = fingers[k]->d;
+          off[k] = off[k];
+        } else
+          buff[k] = FLT_MAX;
+      }
+
+      /* Sort the buffer. */
+      for (int i = 0; i < 7; i++)
+        for (int k = i + 1; k < 8; k++)
+          if (buff[inds[k]] < buff[inds[i]]) {
+            int temp_i = inds[i];
+            inds[i] = inds[k];
+            inds[k] = temp_i;
+          }
+
+      /* For each entry in the new sort list. */
+      struct entry *finger = c->stars.sort[j];
+      for (int ind = 0; ind < count; ind++) {
+
+        /* Copy the minimum into the new sort array. */
+        finger[ind].d = buff[inds[0]];
+        finger[ind].i = fingers[inds[0]]->i + off[inds[0]];
+
+        /* Update the buffer. */
+        fingers[inds[0]] += 1;
+        buff[inds[0]] = fingers[inds[0]]->d;
+
+        /* Find the smallest entry. */
+        for (int k = 1; k < 8 && buff[inds[k]] < buff[inds[k - 1]]; k++) {
+          int temp_i = inds[k - 1];
+          inds[k - 1] = inds[k];
+          inds[k] = temp_i;
+        }
+
+      } /* Merge. */
+
+      /* Add a sentinel. */
+      c->stars.sort[j][count].d = FLT_MAX;
+      c->stars.sort[j][count].i = 0;
+
+      /* Mark as sorted. */
+      atomic_or(&c->stars.sorted, 1 << j);
+
+    } /* loop over sort arrays. */
+
+  } /* progeny? */
+
+  /* Otherwise, just sort. */
+  else {
+
+    /* Reset the sort distance */
+    if (c->stars.sorted == 0) {
+
+      /* And the individual sort distances if we are a local cell */
+      for (int k = 0; k < count; k++) {
+        sparts[k].x_diff_sort[0] = 0.0f;
+        sparts[k].x_diff_sort[1] = 0.0f;
+        sparts[k].x_diff_sort[2] = 0.0f;
+      }
+      c->stars.dx_max_sort_old = 0.f;
+      c->stars.dx_max_sort = 0.f;
+    }
+
+    /* Fill the sort array. */
+    for (int k = 0; k < count; k++) {
+      const double px[3] = {sparts[k].x[0], sparts[k].x[1], sparts[k].x[2]};
+      for (int j = 0; j < 13; j++)
+        if (flags & (1 << j)) {
+          c->stars.sort[j][k].i = k;
+          c->stars.sort[j][k].d = px[0] * runner_shift[j][0] +
+                                  px[1] * runner_shift[j][1] +
+                                  px[2] * runner_shift[j][2];
+        }
+    }
+
+    /* Add the sentinel and sort. */
+    for (int j = 0; j < 13; j++)
+      if (flags & (1 << j)) {
+        c->stars.sort[j][count].d = FLT_MAX;
+        c->stars.sort[j][count].i = 0;
+        runner_do_sort_ascending(c->stars.sort[j], count);
+        atomic_or(&c->stars.sorted, 1 << j);
+      }
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Verify the sorting. */
+  for (int j = 0; j < 13; j++) {
+    if (!(flags & (1 << j))) continue;
+    struct entry *finger = c->stars.sort[j];
+    for (int k = 1; k < count; k++) {
+      if (finger[k].d < finger[k - 1].d)
+        error("Sorting failed, ascending array.");
+      if (finger[k].i >= count) error("Sorting failed, indices borked.");
+    }
+  }
+
+  /* Make sure the sort flags are consistent (downward). */
+  runner_check_sorts_stars(c, flags);
+
+  /* Make sure the sort flags are consistent (upward). */
+  for (struct cell *finger = c->parent; finger != NULL;
+       finger = finger->parent) {
+    if (finger->stars.sorted & ~c->stars.sorted)
+      error("Inconsistent sort flags.");
+  }
+#endif
+
+  /* Clear the cell's sort flags. */
+  c->stars.do_sort = 0;
+  c->stars.do_sub_sort = 0;
+  c->stars.requires_sorts = 0;
+
+  if (clock) TIMER_TOC(timer_do_stars_sort);
+}
+
 /**
  * @brief Initialize the multipoles before the gravity calculation.
  *
@@ -618,7 +1230,7 @@ void runner_do_init_grav(struct runner *r, struct cell *c, int timer) {
   if (!cell_is_active_gravity(c, e)) return;
 
   /* Reset the gravity acceleration tensors */
-  gravity_field_tensors_init(&c->multipole->pot, e->ti_current);
+  gravity_field_tensors_init(&c->grav.multipole->pot, e->ti_current);
 
   /* Recurse? */
   if (c->split) {
@@ -642,11 +1254,15 @@ void runner_do_extra_ghost(struct runner *r, struct cell *c, int timer) {
 
 #ifdef EXTRA_HYDRO_LOOP
 
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  const int count = c->hydro.count;
   const struct engine *e = r->e;
+  const integertime_t ti_end = e->ti_current;
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const double time_base = e->time_base;
   const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
 
   TIMER_TIC;
 
@@ -673,8 +1289,19 @@ void runner_do_extra_ghost(struct runner *r, struct cell *c, int timer) {
 
         /* As of here, particle force variables will be set. */
 
+        /* Calculate the time-step for passing to hydro_prepare_force.
+         * This is the physical time between the start and end of the time-step
+         * without any scale-factor powers. */
+        double dt_alpha;
+        if (with_cosmology) {
+          const integertime_t ti_step = get_integer_timestep(p->time_bin);
+          dt_alpha = cosmology_get_delta_time(cosmo, ti_end - ti_step, ti_end);
+        } else {
+          dt_alpha = get_timestep(p->time_bin, time_base);
+        }
+
         /* Compute variables required for the force loop */
-        hydro_prepare_force(p, xp, cosmo);
+        hydro_prepare_force(p, xp, cosmo, hydro_props, dt_alpha);
 
         /* The particle force values are now set.  Do _NOT_
            try to read any particle density variables! */
@@ -702,14 +1329,16 @@ void runner_do_extra_ghost(struct runner *r, struct cell *c, int timer) {
  */
 void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
 
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
   const struct engine *e = r->e;
   const struct space *s = e->s;
   const struct hydro_space *hs = &s->hs;
   const struct cosmology *cosmo = e->cosmology;
   const struct chemistry_global_data *chemistry = e->chemistry;
+  const struct star_formation *star_formation = e->star_formation;
   const float hydro_h_max = e->hydro_properties->h_max;
+  const float hydro_h_min = e->hydro_properties->h_min;
   const float eps = e->hydro_properties->h_tolerance;
   const float hydro_eta_dim =
       pow_dimension(e->hydro_properties->eta_neighbours);
@@ -727,13 +1356,26 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
       if (c->progeny[k] != NULL) runner_do_ghost(r, c->progeny[k], 0);
   } else {
 
-    /* Init the list of active particles that have to be updated. */
+    /* Init the list of active particles that have to be updated and their
+     * current smoothing lengths. */
     int *pid = NULL;
-    if ((pid = (int *)malloc(sizeof(int) * c->count)) == NULL)
+    float *h_0 = NULL;
+    float *left = NULL;
+    float *right = NULL;
+    if ((pid = (int *)malloc(sizeof(int) * c->hydro.count)) == NULL)
       error("Can't allocate memory for pid.");
-    for (int k = 0; k < c->count; k++)
+    if ((h_0 = (float *)malloc(sizeof(float) * c->hydro.count)) == NULL)
+      error("Can't allocate memory for h_0.");
+    if ((left = (float *)malloc(sizeof(float) * c->hydro.count)) == NULL)
+      error("Can't allocate memory for left.");
+    if ((right = (float *)malloc(sizeof(float) * c->hydro.count)) == NULL)
+      error("Can't allocate memory for right.");
+    for (int k = 0; k < c->hydro.count; k++)
       if (part_is_active(&parts[k], e)) {
         pid[count] = k;
+        h_0[count] = parts[k].h;
+        left[count] = 0.f;
+        right[count] = hydro_h_max;
         ++count;
       }
 
@@ -757,9 +1399,11 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
 #endif
 
         /* Get some useful values */
+        const float h_init = h_0[i];
         const float h_old = p->h;
         const float h_old_dim = pow_dimension(h_old);
         const float h_old_dim_minus_one = pow_dimension_minus_one(h_old);
+
         float h_new;
         int has_no_neighbours = 0;
 
@@ -776,6 +1420,7 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
           /* Finish the density calculation */
           hydro_end_density(p, cosmo);
           chemistry_end_density(p, chemistry, cosmo);
+          star_formation_end_density(p, star_formation, cosmo);
 
           /* Compute one step of the Newton-Raphson scheme */
           const float n_sum = p->density.wcount * h_old_dim;
@@ -785,12 +1430,27 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
               p->density.wcount_dh * h_old_dim +
               hydro_dimension * p->density.wcount * h_old_dim_minus_one;
 
+          /* Improve the bisection bounds */
+          if (n_sum < n_target)
+            left[i] = max(left[i], h_old);
+          else if (n_sum > n_target)
+            right[i] = min(right[i], h_old);
+
+#ifdef SWIFT_DEBUG_CHECKS
+          /* Check the validity of the left and right bounds */
+          if (left[i] > right[i])
+            error("Invalid left (%e) and right (%e)", left[i], right[i]);
+#endif
+
           /* Skip if h is already h_max and we don't have enough neighbours */
-          if ((p->h >= hydro_h_max) && (f < 0.f)) {
+          /* Same if we are below h_min */
+          if (((p->h >= hydro_h_max) && (f < 0.f)) ||
+              ((p->h <= hydro_h_min) && (f > 0.f))) {
 
-          /* We have a particle whose smoothing length is already set (wants to
-           * be larger but has already hit the maximum). So, just tidy up as if
-           * the smoothing length had converged correctly  */
+          /* We have a particle whose smoothing length is already set (wants
+           * to be larger but has already hit the maximum OR wants to be smaller
+           * but has already reached the minimum). So, just tidy up as if the
+           * smoothing length had converged correctly  */
 
 #ifdef EXTRA_HYDRO_LOOP
 
@@ -807,10 +1467,28 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
             hydro_reset_gradient(p);
 
 #else
+            const struct hydro_props *hydro_props = e->hydro_properties;
+
+            /* Calculate the time-step for passing to hydro_prepare_force, used
+             * for the evolution of alpha factors (i.e. those involved in the
+             * artificial viscosity and thermal conduction terms) */
+            const int with_cosmology = (e->policy & engine_policy_cosmology);
+            const double time_base = e->time_base;
+            const integertime_t ti_end = e->ti_current;
+            double dt_alpha;
+
+            if (with_cosmology) {
+              const integertime_t ti_step = get_integer_timestep(p->time_bin);
+              dt_alpha =
+                  cosmology_get_delta_time(cosmo, ti_end - ti_step, ti_end);
+            } else {
+              dt_alpha = get_timestep(p->time_bin, time_base);
+            }
+
             /* As of here, particle force variables will be set. */
 
             /* Compute variables required for the force loop */
-            hydro_prepare_force(p, xp, cosmo);
+            hydro_prepare_force(p, xp, cosmo, hydro_props, dt_alpha);
 
             /* The particle force values are now set.  Do _NOT_
                try to read any particle density variables! */
@@ -820,6 +1498,7 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
 
 #endif /* EXTRA_HYDRO_LOOP */
 
+            /* Ok, we are done with this particle */
             continue;
           }
 
@@ -828,6 +1507,17 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
           /* Avoid floating point exception from f_prime = 0 */
           h_new = h_old - f / (f_prime + FLT_MIN);
 
+          /* Be verbose about the particles that struggle to converge */
+          if (num_reruns > max_smoothing_iter - 10) {
+
+            message(
+                "Smoothing length convergence problem: iter=%d p->id=%lld "
+                "h_init=%12.8e h_old=%12.8e h_new=%12.8e f=%f f_prime=%f "
+                "n_sum=%12.8e n_target=%12.8e left=%12.8e right=%12.8e",
+                num_reruns, p->id, h_init, h_old, h_new, f, f_prime, n_sum,
+                n_target, left[i], right[i]);
+          }
+
 #ifdef SWIFT_DEBUG_CHECKS
           if ((f > 0.f && h_new > h_old) || (f < 0.f && h_new < h_old))
             error(
@@ -837,29 +1527,55 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
           /* Safety check: truncate to the range [ h_old/2 , 2h_old ]. */
           h_new = min(h_new, 2.f * h_old);
           h_new = max(h_new, 0.5f * h_old);
+
+          /* Verify that we are actually progrssing towards the answer */
+          h_new = max(h_new, left[i]);
+          h_new = min(h_new, right[i]);
         }
 
         /* Check whether the particle has an inappropriate smoothing length */
         if (fabsf(h_new - h_old) > eps * h_old) {
 
           /* Ok, correct then */
-          p->h = h_new;
 
-          /* If below the absolute maximum, try again */
-          if (p->h < hydro_h_max) {
+          /* Case where we have been oscillating around the solution */
+          if ((h_new == left[i] && h_old == right[i]) ||
+              (h_old == left[i] && h_new == right[i])) {
+
+            /* Bissect the remaining interval */
+            p->h = pow_inv_dimension(
+                0.5f * (pow_dimension(left[i]) + pow_dimension(right[i])));
+
+          } else {
+
+            /* Normal case */
+            p->h = h_new;
+          }
+
+          /* If within the allowed range, try again */
+          if (p->h < hydro_h_max && p->h > hydro_h_min) {
 
             /* Flag for another round of fun */
             pid[redo] = pid[i];
+            h_0[redo] = h_0[i];
+            left[redo] = left[i];
+            right[redo] = right[i];
             redo += 1;
 
             /* Re-initialise everything */
             hydro_init_part(p, hs);
             chemistry_init_part(p, chemistry);
+            star_formation_init_part(p, star_formation);
 
             /* Off we go ! */
             continue;
 
-          } else {
+          } else if (p->h <= hydro_h_min) {
+
+            /* Ok, this particle is a lost cause... */
+            p->h = hydro_h_min;
+
+          } else if (p->h >= hydro_h_max) {
 
             /* Ok, this particle is a lost cause... */
             p->h = hydro_h_max;
@@ -868,7 +1584,14 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
             if (has_no_neighbours) {
               hydro_part_has_no_neighbours(p, xp, cosmo);
               chemistry_part_has_no_neighbours(p, xp, chemistry, cosmo);
+              star_formation_part_has_no_neighbours(p, xp, star_formation,
+                                                    cosmo);
             }
+
+          } else {
+            error(
+                "Fundamental problem with the smoothing length iteration "
+                "logic.");
           }
         }
 
@@ -889,10 +1612,27 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
         hydro_reset_gradient(p);
 
 #else
+        const struct hydro_props *hydro_props = e->hydro_properties;
+
+        /* Calculate the time-step for passing to hydro_prepare_force, used for
+         * the evolution of alpha factors (i.e. those involved in the artificial
+         * viscosity and thermal conduction terms) */
+        const int with_cosmology = (e->policy & engine_policy_cosmology);
+        const integertime_t ti_end = e->ti_current;
+        const double time_base = e->time_base;
+        double dt_alpha;
+
+        if (with_cosmology) {
+          const integertime_t ti_step = get_integer_timestep(p->time_bin);
+          dt_alpha = cosmology_get_delta_time(cosmo, ti_end - ti_step, ti_end);
+        } else {
+          dt_alpha = get_timestep(p->time_bin, time_base);
+        }
+
         /* As of here, particle force variables will be set. */
 
         /* Compute variables required for the force loop */
-        hydro_prepare_force(p, xp, cosmo);
+        hydro_prepare_force(p, xp, cosmo, hydro_props, dt_alpha);
 
         /* The particle force values are now set.  Do _NOT_
            try to read any particle density variables! */
@@ -914,7 +1654,7 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
         for (struct cell *finger = c; finger != NULL; finger = finger->parent) {
 
           /* Run through this cell's density interactions. */
-          for (struct link *l = finger->density; l != NULL; l = l->next) {
+          for (struct link *l = finger->hydro.density; l != NULL; l = l->next) {
 
 #ifdef SWIFT_DEBUG_CHECKS
             if (l->t->ti_run < r->e->ti_current)
@@ -963,38 +1703,70 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) {
     }
 
     /* Be clean */
+    free(left);
+    free(right);
     free(pid);
+    free(h_0);
   }
 
   if (timer) TIMER_TOC(timer_do_ghost);
 }
 
 /**
- * @brief Unskip any hydro tasks associated with active cells.
+ * @brief Unskip any hydro tasks associated with active cells.
+ *
+ * @param c The cell.
+ * @param e The engine.
+ */
+static void runner_do_unskip_hydro(struct cell *c, struct engine *e) {
+
+  /* Ignore empty cells. */
+  if (c->hydro.count == 0) return;
+
+  /* Skip inactive cells. */
+  if (!cell_is_active_hydro(c, e)) return;
+
+  /* Recurse */
+  if (c->split) {
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL) {
+        struct cell *cp = c->progeny[k];
+        runner_do_unskip_hydro(cp, e);
+      }
+    }
+  }
+
+  /* Unskip any active tasks. */
+  const int forcerebuild = cell_unskip_hydro_tasks(c, &e->sched);
+  if (forcerebuild) atomic_inc(&e->forcerebuild);
+}
+
+/**
+ * @brief Unskip any stars tasks associated with active cells.
  *
  * @param c The cell.
  * @param e The engine.
  */
-static void runner_do_unskip_hydro(struct cell *c, struct engine *e) {
+static void runner_do_unskip_stars(struct cell *c, struct engine *e) {
 
   /* Ignore empty cells. */
-  if (c->count == 0) return;
+  if (c->stars.count == 0) return;
 
   /* Skip inactive cells. */
-  if (!cell_is_active_hydro(c, e)) return;
+  if (!cell_is_active_stars(c, e)) return;
 
   /* Recurse */
   if (c->split) {
     for (int k = 0; k < 8; k++) {
       if (c->progeny[k] != NULL) {
         struct cell *cp = c->progeny[k];
-        runner_do_unskip_hydro(cp, e);
+        runner_do_unskip_stars(cp, e);
       }
     }
   }
 
   /* Unskip any active tasks. */
-  const int forcerebuild = cell_unskip_hydro_tasks(c, &e->sched);
+  const int forcerebuild = cell_unskip_stars_tasks(c, &e->sched);
   if (forcerebuild) atomic_inc(&e->forcerebuild);
 }
 
@@ -1007,13 +1779,13 @@ static void runner_do_unskip_hydro(struct cell *c, struct engine *e) {
 static void runner_do_unskip_gravity(struct cell *c, struct engine *e) {
 
   /* Ignore empty cells. */
-  if (c->gcount == 0) return;
+  if (c->grav.count == 0) return;
 
   /* Skip inactive cells. */
   if (!cell_is_active_gravity(c, e)) return;
 
   /* Recurse */
-  if (c->split && c->depth < space_subdepth_grav) {
+  if (c->split && ((c->maxdepth - c->depth) >= space_subdepth_diff_grav)) {
     for (int k = 0; k < 8; k++) {
       if (c->progeny[k] != NULL) {
         struct cell *cp = c->progeny[k];
@@ -1051,9 +1823,13 @@ void runner_do_unskip_mapper(void *map_data, int num_elements,
       if ((e->policy & engine_policy_self_gravity) ||
           (e->policy & engine_policy_external_gravity))
         runner_do_unskip_gravity(c, e);
+
+      /* Stars tasks */
+      if (e->policy & engine_policy_stars) runner_do_unskip_stars(c, e);
     }
   }
 }
+
 /**
  * @brief Drift all part in a cell.
  *
@@ -1086,6 +1862,21 @@ void runner_do_drift_gpart(struct runner *r, struct cell *c, int timer) {
   if (timer) TIMER_TOC(timer_drift_gpart);
 }
 
+/**
+ * @brief Drift all spart in a cell.
+ *
+ * @param r The runner thread.
+ * @param c The cell.
+ * @param timer Are we timing this ?
+ */
+void runner_do_drift_spart(struct runner *r, struct cell *c, int timer) {
+
+  TIMER_TIC;
+
+  cell_drift_spart(c, r->e, 0);
+
+  if (timer) TIMER_TOC(timer_drift_spart);
+}
 /**
  * @brief Perform the first half-kick on all the active particles in a cell.
  *
@@ -1098,21 +1889,24 @@ void runner_do_kick1(struct runner *r, struct cell *c, int timer) {
   const struct engine *e = r->e;
   const struct cosmology *cosmo = e->cosmology;
   const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct entropy_floor_properties *entropy_floor = e->entropy_floor;
   const int with_cosmology = (e->policy & engine_policy_cosmology);
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
-  struct gpart *restrict gparts = c->gparts;
-  struct spart *restrict sparts = c->sparts;
-  const int count = c->count;
-  const int gcount = c->gcount;
-  const int scount = c->scount;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  struct gpart *restrict gparts = c->grav.parts;
+  struct spart *restrict sparts = c->stars.parts;
+  const int count = c->hydro.count;
+  const int gcount = c->grav.count;
+  const int scount = c->stars.count;
   const integertime_t ti_current = e->ti_current;
   const double time_base = e->time_base;
 
   TIMER_TIC;
 
   /* Anything to do here? */
-  if (!cell_is_starting_hydro(c, e) && !cell_is_starting_gravity(c, e)) return;
+  if (!cell_is_starting_hydro(c, e) && !cell_is_starting_gravity(c, e) &&
+      !cell_is_starting_stars(c, e))
+    return;
 
   /* Recurse? */
   if (c->split) {
@@ -1130,19 +1924,26 @@ void runner_do_kick1(struct runner *r, struct cell *c, int timer) {
       /* If particle needs to be kicked */
       if (part_is_starting(p, e)) {
 
+#ifdef SWIFT_DEBUG_CHECKS
+        if (p->wakeup == time_bin_awake)
+          error("Woken-up particle that has not been processed in kick1");
+#endif
+
+        /* Skip particles that have been woken up and treated by the limiter. */
+        if (p->wakeup != time_bin_not_awake) continue;
+
         const integertime_t ti_step = get_integer_timestep(p->time_bin);
         const integertime_t ti_begin =
             get_integer_time_begin(ti_current + 1, p->time_bin);
 
 #ifdef SWIFT_DEBUG_CHECKS
-        const integertime_t ti_end =
-            get_integer_time_end(ti_current + 1, p->time_bin);
+        const integertime_t ti_end = ti_begin + ti_step;
 
         if (ti_begin != ti_current)
           error(
               "Particle in wrong time-bin, ti_end=%lld, ti_begin=%lld, "
-              "ti_step=%lld time_bin=%d ti_current=%lld",
-              ti_end, ti_begin, ti_step, p->time_bin, ti_current);
+              "ti_step=%lld time_bin=%d wakeup=%d ti_current=%lld",
+              ti_end, ti_begin, ti_step, p->time_bin, p->wakeup, ti_current);
 #endif
 
         /* Time interval for this half-kick */
@@ -1165,7 +1966,7 @@ void runner_do_kick1(struct runner *r, struct cell *c, int timer) {
 
         /* do the kick */
         kick_part(p, xp, dt_kick_hydro, dt_kick_grav, dt_kick_therm,
-                  dt_kick_corr, cosmo, hydro_props, ti_begin,
+                  dt_kick_corr, cosmo, hydro_props, entropy_floor, ti_begin,
                   ti_begin + ti_step / 2);
 
         /* Update the accelerations to be used in the drift for hydro */
@@ -1216,7 +2017,7 @@ void runner_do_kick1(struct runner *r, struct cell *c, int timer) {
       }
     }
 
-    /* Loop over the star particles in this cell. */
+    /* Loop over the stars particles in this cell. */
     for (int k = 0; k < scount; k++) {
 
       /* Get a handle on the s-part. */
@@ -1272,21 +2073,24 @@ void runner_do_kick2(struct runner *r, struct cell *c, int timer) {
   const struct engine *e = r->e;
   const struct cosmology *cosmo = e->cosmology;
   const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct entropy_floor_properties *entropy_floor = e->entropy_floor;
   const int with_cosmology = (e->policy & engine_policy_cosmology);
-  const int count = c->count;
-  const int gcount = c->gcount;
-  const int scount = c->scount;
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
-  struct gpart *restrict gparts = c->gparts;
-  struct spart *restrict sparts = c->sparts;
+  const int count = c->hydro.count;
+  const int gcount = c->grav.count;
+  const int scount = c->stars.count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  struct gpart *restrict gparts = c->grav.parts;
+  struct spart *restrict sparts = c->stars.parts;
   const integertime_t ti_current = e->ti_current;
   const double time_base = e->time_base;
 
   TIMER_TIC;
 
   /* Anything to do here? */
-  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e)) return;
+  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e) &&
+      !cell_is_active_stars(c, e))
+    return;
 
   /* Recurse? */
   if (c->split) {
@@ -1304,39 +2108,60 @@ void runner_do_kick2(struct runner *r, struct cell *c, int timer) {
       /* If particle needs to be kicked */
       if (part_is_active(p, e)) {
 
-        const integertime_t ti_step = get_integer_timestep(p->time_bin);
-        const integertime_t ti_begin =
-            get_integer_time_begin(ti_current, p->time_bin);
+        integertime_t ti_begin, ti_end, ti_step;
+
+#ifdef SWIFT_DEBUG_CHECKS
+        if (p->wakeup == time_bin_awake)
+          error("Woken-up particle that has not been processed in kick1");
+#endif
+
+        if (p->wakeup == time_bin_not_awake) {
+
+          /* Time-step from a regular kick */
+          ti_step = get_integer_timestep(p->time_bin);
+          ti_begin = get_integer_time_begin(ti_current, p->time_bin);
+          ti_end = ti_begin + ti_step;
+
+        } else {
+
+          /* Time-step that follows a wake-up call */
+          ti_begin = get_integer_time_begin(ti_current, p->wakeup);
+          ti_end = get_integer_time_end(ti_current, p->time_bin);
+          ti_step = ti_end - ti_begin;
+
+          /* Reset the flag. Everything is back to normal from now on. */
+          p->wakeup = time_bin_awake;
+        }
 
 #ifdef SWIFT_DEBUG_CHECKS
         if (ti_begin + ti_step != ti_current)
           error(
               "Particle in wrong time-bin, ti_begin=%lld, ti_step=%lld "
-              "time_bin=%d ti_current=%lld",
-              ti_begin, ti_step, p->time_bin, ti_current);
+              "time_bin=%d wakeup=%d ti_current=%lld",
+              ti_begin, ti_step, p->time_bin, p->wakeup, ti_current);
 #endif
         /* Time interval for this half-kick */
         double dt_kick_grav, dt_kick_hydro, dt_kick_therm, dt_kick_corr;
         if (with_cosmology) {
           dt_kick_hydro = cosmology_get_hydro_kick_factor(
-              cosmo, ti_begin + ti_step / 2, ti_begin + ti_step);
+              cosmo, ti_begin + ti_step / 2, ti_end);
           dt_kick_grav = cosmology_get_grav_kick_factor(
-              cosmo, ti_begin + ti_step / 2, ti_begin + ti_step);
+              cosmo, ti_begin + ti_step / 2, ti_end);
           dt_kick_therm = cosmology_get_therm_kick_factor(
-              cosmo, ti_begin + ti_step / 2, ti_begin + ti_step);
+              cosmo, ti_begin + ti_step / 2, ti_end);
           dt_kick_corr = cosmology_get_corr_kick_factor(
-              cosmo, ti_begin + ti_step / 2, ti_begin + ti_step);
+              cosmo, ti_begin + ti_step / 2, ti_end);
         } else {
-          dt_kick_hydro = (ti_step / 2) * time_base;
-          dt_kick_grav = (ti_step / 2) * time_base;
-          dt_kick_therm = (ti_step / 2) * time_base;
-          dt_kick_corr = (ti_step / 2) * time_base;
+          dt_kick_hydro = (ti_end - (ti_begin + ti_step / 2)) * time_base;
+          dt_kick_grav = (ti_end - (ti_begin + ti_step / 2)) * time_base;
+          dt_kick_therm = (ti_end - (ti_begin + ti_step / 2)) * time_base;
+          dt_kick_corr = (ti_end - (ti_begin + ti_step / 2)) * time_base;
         }
 
         /* Finish the time-step with a second half-kick */
         kick_part(p, xp, dt_kick_hydro, dt_kick_grav, dt_kick_therm,
-                  dt_kick_corr, cosmo, hydro_props, ti_begin + ti_step / 2,
-                  ti_begin + ti_step);
+                  dt_kick_corr, cosmo, hydro_props, entropy_floor,
+                  ti_begin + ti_step / 2, ti_end);
 
 #ifdef SWIFT_DEBUG_CHECKS
         /* Check that kick and the drift are synchronized */
@@ -1428,7 +2253,7 @@ void runner_do_kick2(struct runner *r, struct cell *c, int timer) {
 #endif
 
         /* Prepare the values to be drifted */
-        star_reset_predicted_values(sp);
+        stars_reset_predicted_values(sp);
       }
     }
   }
@@ -1447,29 +2272,34 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
 
   const struct engine *e = r->e;
   const integertime_t ti_current = e->ti_current;
-  const int count = c->count;
-  const int gcount = c->gcount;
-  const int scount = c->scount;
-  struct part *restrict parts = c->parts;
-  struct xpart *restrict xparts = c->xparts;
-  struct gpart *restrict gparts = c->gparts;
-  struct spart *restrict sparts = c->sparts;
+  const int with_cosmology = (e->policy & engine_policy_cosmology);
+  const int count = c->hydro.count;
+  const int gcount = c->grav.count;
+  const int scount = c->stars.count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  struct gpart *restrict gparts = c->grav.parts;
+  struct spart *restrict sparts = c->stars.parts;
 
   TIMER_TIC;
 
   /* Anything to do here? */
-  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e)) {
-    c->updated = 0;
-    c->g_updated = 0;
-    c->s_updated = 0;
+  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e) &&
+      !cell_is_active_stars(c, e)) {
+    c->hydro.updated = 0;
+    c->grav.updated = 0;
+    c->stars.updated = 0;
     return;
   }
 
   int updated = 0, g_updated = 0, s_updated = 0;
+  int inhibited = 0, g_inhibited = 0, s_inhibited = 0;
   integertime_t ti_hydro_end_min = max_nr_timesteps, ti_hydro_end_max = 0,
                 ti_hydro_beg_max = 0;
   integertime_t ti_gravity_end_min = max_nr_timesteps, ti_gravity_end_max = 0,
                 ti_gravity_beg_max = 0;
+  integertime_t ti_stars_end_min = max_nr_timesteps, ti_stars_end_max = 0,
+                ti_stars_beg_max = 0;
 
   /* No children? */
   if (!c->split) {
@@ -1500,6 +2330,11 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
         p->time_bin = get_time_bin(ti_new_step);
         if (p->gpart != NULL) p->gpart->time_bin = p->time_bin;
 
+        /* Update the tracers properties */
+        tracers_after_timestep(p, xp, e->internal_units, e->physical_constants,
+                               with_cosmology, e->cosmology,
+                               e->hydro_properties, e->cooling_func, e->time);
+
         /* Number of updated particles */
         updated++;
         if (p->gpart != NULL) g_updated++;
@@ -1526,6 +2361,9 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
 
       else { /* part is inactive */
 
+        /* Count the number of inhibited particles */
+        if (part_is_inhibited(p, e)) inhibited++;
+
         const integertime_t ti_end =
             get_integer_time_end(ti_current, p->time_bin);
 
@@ -1592,6 +2430,9 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
 
         } else { /* gpart is inactive */
 
+          /* Count the number of inhibited particles */
+          if (gpart_is_inhibited(gp, e)) g_inhibited++;
+
           const integertime_t ti_end =
               get_integer_time_end(ti_current, gp->time_bin);
 
@@ -1636,33 +2477,41 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
         s_updated++;
         g_updated++;
 
-        /* What is the next sync-point ? */
+        ti_stars_end_min = min(ti_current + ti_new_step, ti_stars_end_min);
+        ti_stars_end_max = max(ti_current + ti_new_step, ti_stars_end_max);
         ti_gravity_end_min = min(ti_current + ti_new_step, ti_gravity_end_min);
         ti_gravity_end_max = max(ti_current + ti_new_step, ti_gravity_end_max);
 
         /* What is the next starting point for this cell ? */
+        ti_stars_beg_max = max(ti_current, ti_stars_beg_max);
         ti_gravity_beg_max = max(ti_current, ti_gravity_beg_max);
 
-      } else { /* star particle is inactive */
+        /* star particle is inactive but not inhibited */
+      } else {
+
+        /* Count the number of inhibited particles */
+        if (spart_is_inhibited(sp, e)) ++s_inhibited;
 
         const integertime_t ti_end =
             get_integer_time_end(ti_current, sp->time_bin);
 
-        /* What is the next sync-point ? */
-        ti_gravity_end_min = min(ti_end, ti_gravity_end_min);
-        ti_gravity_end_max = max(ti_end, ti_gravity_end_max);
-
         const integertime_t ti_beg =
             get_integer_time_begin(ti_current + 1, sp->time_bin);
 
+        ti_stars_end_min = min(ti_end, ti_stars_end_min);
+        ti_stars_end_max = max(ti_end, ti_stars_end_max);
+        ti_gravity_end_min = min(ti_end, ti_gravity_end_min);
+        ti_gravity_end_max = max(ti_end, ti_gravity_end_max);
+
         /* What is the next starting point for this cell ? */
+        ti_stars_beg_max = max(ti_beg, ti_stars_beg_max);
         ti_gravity_beg_max = max(ti_beg, ti_gravity_beg_max);
       }
     }
   } else {
 
     /* Loop over the progeny. */
-    for (int k = 0; k < 8; k++)
+    for (int k = 0; k < 8; k++) {
       if (c->progeny[k] != NULL) {
         struct cell *restrict cp = c->progeny[k];
 
@@ -1670,73 +2519,222 @@ void runner_do_timestep(struct runner *r, struct cell *c, int timer) {
         runner_do_timestep(r, cp, 0);
 
         /* And aggregate */
-        updated += cp->updated;
-        g_updated += cp->g_updated;
-        s_updated += cp->s_updated;
-        ti_hydro_end_min = min(cp->ti_hydro_end_min, ti_hydro_end_min);
-        ti_hydro_end_max = max(cp->ti_hydro_end_max, ti_hydro_end_max);
-        ti_hydro_beg_max = max(cp->ti_hydro_beg_max, ti_hydro_beg_max);
-        ti_gravity_end_min = min(cp->ti_gravity_end_min, ti_gravity_end_min);
-        ti_gravity_end_max = max(cp->ti_gravity_end_max, ti_gravity_end_max);
-        ti_gravity_beg_max = max(cp->ti_gravity_beg_max, ti_gravity_beg_max);
+        updated += cp->hydro.updated;
+        g_updated += cp->grav.updated;
+        s_updated += cp->stars.updated;
+        inhibited += cp->hydro.inhibited;
+        g_inhibited += cp->grav.inhibited;
+        s_inhibited += cp->stars.inhibited;
+        ti_hydro_end_min = min(cp->hydro.ti_end_min, ti_hydro_end_min);
+        ti_hydro_end_max = max(cp->hydro.ti_end_max, ti_hydro_end_max);
+        ti_hydro_beg_max = max(cp->hydro.ti_beg_max, ti_hydro_beg_max);
+        ti_gravity_end_min = min(cp->grav.ti_end_min, ti_gravity_end_min);
+        ti_gravity_end_max = max(cp->grav.ti_end_max, ti_gravity_end_max);
+        ti_gravity_beg_max = max(cp->grav.ti_beg_max, ti_gravity_beg_max);
+        ti_stars_end_min = min(cp->stars.ti_end_min, ti_stars_end_min);
+        ti_stars_end_max = max(cp->grav.ti_end_max, ti_stars_end_max);
+        ti_stars_beg_max = max(cp->grav.ti_beg_max, ti_stars_beg_max);
       }
+    }
   }
 
   /* Store the values. */
-  c->updated = updated;
-  c->g_updated = g_updated;
-  c->s_updated = s_updated;
-  c->ti_hydro_end_min = ti_hydro_end_min;
-  c->ti_hydro_end_max = ti_hydro_end_max;
-  c->ti_hydro_beg_max = ti_hydro_beg_max;
-  c->ti_gravity_end_min = ti_gravity_end_min;
-  c->ti_gravity_end_max = ti_gravity_end_max;
-  c->ti_gravity_beg_max = ti_gravity_beg_max;
+  c->hydro.updated = updated;
+  c->grav.updated = g_updated;
+  c->stars.updated = s_updated;
+  c->hydro.inhibited = inhibited;
+  c->grav.inhibited = g_inhibited;
+  c->stars.inhibited = s_inhibited;
+  c->hydro.ti_end_min = ti_hydro_end_min;
+  c->hydro.ti_end_max = ti_hydro_end_max;
+  c->hydro.ti_beg_max = ti_hydro_beg_max;
+  c->grav.ti_end_min = ti_gravity_end_min;
+  c->grav.ti_end_max = ti_gravity_end_max;
+  c->grav.ti_beg_max = ti_gravity_beg_max;
+  c->stars.ti_end_min = ti_stars_end_min;
+  c->stars.ti_end_max = ti_stars_end_max;
+  c->stars.ti_beg_max = ti_stars_beg_max;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->hydro.ti_end_min == e->ti_current &&
+      c->hydro.ti_end_min < max_nr_timesteps)
+    error("End of next hydro step is current time!");
+  if (c->grav.ti_end_min == e->ti_current &&
+      c->grav.ti_end_min < max_nr_timesteps)
+    error("End of next gravity step is current time!");
+  if (c->stars.ti_end_min == e->ti_current &&
+      c->stars.ti_end_min < max_nr_timesteps)
+    error("End of next stars step is current time!");
+#endif
 
   if (timer) TIMER_TOC(timer_timestep);
 }
 
 /**
- * @brief End the force calculation of all active particles in a cell
- * by multiplying the acccelerations by the relevant constants
+ * @brief Apply the time-step limiter to all awaken particles in a cell
+ * hierarchy.
  *
- * @param r The #runner thread.
+ * @param r The task #runner.
  * @param c The #cell.
+ * @param force Limit the particles irrespective of the #cell flags.
  * @param timer Are we timing this ?
  */
-void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
+void runner_do_limiter(struct runner *r, struct cell *c, int force, int timer) {
 
   const struct engine *e = r->e;
-  const struct space *s = e->s;
-  const struct cosmology *cosmo = e->cosmology;
-  const int count = c->count;
-  const int gcount = c->gcount;
-  const int scount = c->scount;
-  struct part *restrict parts = c->parts;
-  struct gpart *restrict gparts = c->gparts;
-  struct spart *restrict sparts = c->sparts;
-  const int periodic = s->periodic;
-  const float G_newton = e->physical_constants->const_newton_G;
+  const integertime_t ti_current = e->ti_current;
+  const int count = c->hydro.count;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
 
   TIMER_TIC;
 
-  /* Potential normalisation in the case of periodic gravity */
-  float potential_normalisation = 0.;
-  if (periodic && (e->policy & engine_policy_self_gravity)) {
-    const double volume = s->dim[0] * s->dim[1] * s->dim[2];
-    const double r_s = e->mesh->r_s;
-    potential_normalisation = 4. * M_PI * e->total_mass * r_s * r_s / volume;
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check that we only limit local cells. */
+  if (c->nodeID != engine_rank) error("Limiting dt of a foreign cell is nope.");
+#endif
+
+  integertime_t ti_hydro_end_min = max_nr_timesteps, ti_hydro_end_max = 0,
+                ti_hydro_beg_max = 0;
+  integertime_t ti_gravity_end_min = max_nr_timesteps, ti_gravity_end_max = 0,
+                ti_gravity_beg_max = 0;
+
+  /* Limit irrespective of cell flags? */
+  force |= c->hydro.do_limiter;
+
+  /* Early abort? */
+  if (c->hydro.count == 0) {
+
+    /* Clear the limiter flags. */
+    c->hydro.do_limiter = 0;
+    c->hydro.do_sub_limiter = 0;
+    return;
+  }
+
+  /* Loop over the progeny ? */
+  if (c->split && (force || c->hydro.do_sub_limiter)) {
+    for (int k = 0; k < 8; k++) {
+      if (c->progeny[k] != NULL) {
+        struct cell *restrict cp = c->progeny[k];
+
+        /* Recurse */
+        runner_do_limiter(r, cp, force, 0);
+
+        /* And aggregate */
+        ti_hydro_end_min = min(cp->hydro.ti_end_min, ti_hydro_end_min);
+        ti_hydro_end_max = max(cp->hydro.ti_end_max, ti_hydro_end_max);
+        ti_hydro_beg_max = max(cp->hydro.ti_beg_max, ti_hydro_beg_max);
+        ti_gravity_end_min = min(cp->grav.ti_end_min, ti_gravity_end_min);
+        ti_gravity_end_max = max(cp->grav.ti_end_max, ti_gravity_end_max);
+        ti_gravity_beg_max = max(cp->grav.ti_beg_max, ti_gravity_beg_max);
+      }
+    }
+
+    /* Store the updated values */
+    c->hydro.ti_end_min = min(c->hydro.ti_end_min, ti_hydro_end_min);
+    c->hydro.ti_end_max = max(c->hydro.ti_end_max, ti_hydro_end_max);
+    c->hydro.ti_beg_max = max(c->hydro.ti_beg_max, ti_hydro_beg_max);
+    c->grav.ti_end_min = min(c->grav.ti_end_min, ti_gravity_end_min);
+    c->grav.ti_end_max = max(c->grav.ti_end_max, ti_gravity_end_max);
+    c->grav.ti_beg_max = max(c->grav.ti_beg_max, ti_gravity_beg_max);
+
+  } else if (!c->split && force) {
+
+    ti_hydro_end_min = c->hydro.ti_end_min;
+    ti_hydro_end_max = c->hydro.ti_end_max;
+    ti_hydro_beg_max = c->hydro.ti_beg_max;
+    ti_gravity_end_min = c->grav.ti_end_min;
+    ti_gravity_end_max = c->grav.ti_end_max;
+    ti_gravity_beg_max = c->grav.ti_beg_max;
+
+    /* Loop over the gas particles in this cell. */
+    for (int k = 0; k < count; k++) {
+
+      /* Get a handle on the part. */
+      struct part *restrict p = &parts[k];
+      struct xpart *restrict xp = &xparts[k];
+
+      /* Avoid inhibited particles */
+      if (part_is_inhibited(p, e)) continue;
+
+      /* If the particle will be active no need to wake it up */
+      if (part_is_active(p, e) && p->wakeup != time_bin_not_awake)
+        p->wakeup = time_bin_not_awake;
+
+      /* Bip, bip, bip... wake-up time */
+      if (p->wakeup == time_bin_awake) {
+
+        /* Apply the limiter and get the new time-step size */
+        const integertime_t ti_new_step = timestep_limit_part(p, xp, e);
+
+        /* What is the next sync-point ? */
+        ti_hydro_end_min = min(ti_current + ti_new_step, ti_hydro_end_min);
+        ti_hydro_end_max = max(ti_current + ti_new_step, ti_hydro_end_max);
+
+        /* What is the next starting point for this cell ? */
+        ti_hydro_beg_max = max(ti_current, ti_hydro_beg_max);
+
+        /* Also limit the gpart counter-part */
+        if (p->gpart != NULL) {
+
+          /* Register the time-bin */
+          p->gpart->time_bin = p->time_bin;
+
+          /* What is the next sync-point ? */
+          ti_gravity_end_min =
+              min(ti_current + ti_new_step, ti_gravity_end_min);
+          ti_gravity_end_max =
+              max(ti_current + ti_new_step, ti_gravity_end_max);
+
+          /* What is the next starting point for this cell ? */
+          ti_gravity_beg_max = max(ti_current, ti_gravity_beg_max);
+        }
+      }
+    }
+
+    /* Store the updated values */
+    c->hydro.ti_end_min = min(c->hydro.ti_end_min, ti_hydro_end_min);
+    c->hydro.ti_end_max = max(c->hydro.ti_end_max, ti_hydro_end_max);
+    c->hydro.ti_beg_max = max(c->hydro.ti_beg_max, ti_hydro_beg_max);
+    c->grav.ti_end_min = min(c->grav.ti_end_min, ti_gravity_end_min);
+    c->grav.ti_end_max = max(c->grav.ti_end_max, ti_gravity_end_max);
+    c->grav.ti_beg_max = max(c->grav.ti_beg_max, ti_gravity_beg_max);
   }
 
+  /* Clear the limiter flags. */
+  c->hydro.do_limiter = 0;
+  c->hydro.do_sub_limiter = 0;
+
+  if (timer) TIMER_TOC(timer_do_limiter);
+}
+
+/**
+ * @brief End the hydro force calculation of all active particles in a cell
+ * by multiplying the acccelerations by the relevant constants
+ *
+ * @param r The #runner thread.
+ * @param c The #cell.
+ * @param timer Are we timing this ?
+ */
+void runner_do_end_hydro_force(struct runner *r, struct cell *c, int timer) {
+
+  const struct engine *e = r->e;
+
+  TIMER_TIC;
+
   /* Anything to do here? */
-  if (!cell_is_active_hydro(c, e) && !cell_is_active_gravity(c, e)) return;
+  if (!cell_is_active_hydro(c, e)) return;
 
   /* Recurse? */
   if (c->split) {
     for (int k = 0; k < 8; k++)
-      if (c->progeny[k] != NULL) runner_do_end_force(r, c->progeny[k], 0);
+      if (c->progeny[k] != NULL) runner_do_end_hydro_force(r, c->progeny[k], 0);
   } else {
 
+    const struct cosmology *cosmo = e->cosmology;
+    const int count = c->hydro.count;
+    struct part *restrict parts = c->hydro.parts;
+
     /* Loop over the gas particles in this cell. */
     for (int k = 0; k < count; k++) {
 
@@ -1749,6 +2747,48 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
         hydro_end_force(p, cosmo);
       }
     }
+  }
+
+  if (timer) TIMER_TOC(timer_end_hydro_force);
+}
+
+/**
+ * @brief End the gravity force calculation of all active particles in a cell
+ * by multiplying the acccelerations by the relevant constants
+ *
+ * @param r The #runner thread.
+ * @param c The #cell.
+ * @param timer Are we timing this ?
+ */
+void runner_do_end_grav_force(struct runner *r, struct cell *c, int timer) {
+
+  const struct engine *e = r->e;
+
+  TIMER_TIC;
+
+  /* Anything to do here? */
+  if (!cell_is_active_gravity(c, e)) return;
+
+  /* Recurse? */
+  if (c->split) {
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL) runner_do_end_grav_force(r, c->progeny[k], 0);
+  } else {
+
+    const struct space *s = e->s;
+    const int periodic = s->periodic;
+    const float G_newton = e->physical_constants->const_newton_G;
+
+    /* Potential normalisation in the case of periodic gravity */
+    float potential_normalisation = 0.;
+    if (periodic && (e->policy & engine_policy_self_gravity)) {
+      const double volume = s->dim[0] * s->dim[1] * s->dim[2];
+      const double r_s = e->mesh->r_s;
+      potential_normalisation = 4. * M_PI * e->total_mass * r_s * r_s / volume;
+    }
+
+    const int gcount = c->grav.count;
+    struct gpart *restrict gparts = c->grav.parts;
 
     /* Loop over the g-particles in this cell. */
     for (int k = 0; k < gcount; k++) {
@@ -1775,7 +2815,7 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
         long long id = 0;
         if (gp->type == swift_type_gas)
           id = e->s->parts[-gp->id_or_neg_offset].id;
-        else if (gp->type == swift_type_star)
+        else if (gp->type == swift_type_stars)
           id = e->s->sparts[-gp->id_or_neg_offset].id;
         else if (gp->type == swift_type_black_hole)
           error("Unexisting type");
@@ -1800,13 +2840,14 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
 
           /* Check that this gpart has interacted with all the other
            * particles (via direct or multipoles) in the box */
-          if (gp->num_interacted != e->total_nr_gparts) {
+          if (gp->num_interacted !=
+              e->total_nr_gparts - e->count_inhibited_gparts) {
 
             /* Get the ID of the gpart */
             long long my_id = 0;
             if (gp->type == swift_type_gas)
               my_id = e->s->parts[-gp->id_or_neg_offset].id;
-            else if (gp->type == swift_type_star)
+            else if (gp->type == swift_type_stars)
               my_id = e->s->sparts[-gp->id_or_neg_offset].id;
             else if (gp->type == swift_type_black_hole)
               error("Unexisting type");
@@ -1817,29 +2858,16 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
                 "g-particle (id=%lld, type=%s) did not interact "
                 "gravitationally with all other gparts "
                 "gp->num_interacted=%lld, total_gparts=%lld (local "
-                "num_gparts=%zd)",
+                "num_gparts=%zd inhibited_gparts=%lld)",
                 my_id, part_type_names[gp->type], gp->num_interacted,
-                e->total_nr_gparts, e->s->nr_gparts);
+                e->total_nr_gparts, e->s->nr_gparts, e->count_inhibited_gparts);
           }
         }
 #endif
       }
     }
-
-    /* Loop over the star particles in this cell. */
-    for (int k = 0; k < scount; k++) {
-
-      /* Get a handle on the spart. */
-      struct spart *restrict sp = &sparts[k];
-      if (spart_is_active(sp, e)) {
-
-        /* Finish the force loop */
-        star_end_force(sp);
-      }
-    }
   }
-
-  if (timer) TIMER_TOC(timer_endforce);
+  if (timer) TIMER_TOC(timer_end_grav_force);
 }
 
 /**
@@ -1852,11 +2880,10 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
  */
 void runner_do_recv_part(struct runner *r, struct cell *c, int clear_sorts,
                          int timer) {
-
 #ifdef WITH_MPI
 
-  const struct part *restrict parts = c->parts;
-  const size_t nr_parts = c->count;
+  const struct part *restrict parts = c->hydro.parts;
+  const size_t nr_parts = c->hydro.count;
   const integertime_t ti_current = r->e->ti_current;
 
   TIMER_TIC;
@@ -1872,7 +2899,7 @@ void runner_do_recv_part(struct runner *r, struct cell *c, int clear_sorts,
 #endif
 
   /* Clear this cell's sorted mask. */
-  if (clear_sorts) c->sorted = 0;
+  if (clear_sorts) c->hydro.sorted = 0;
 
   /* If this cell is a leaf, collect the particle data. */
   if (!c->split) {
@@ -1893,13 +2920,13 @@ void runner_do_recv_part(struct runner *r, struct cell *c, int clear_sorts,
   /* Otherwise, recurse and collect. */
   else {
     for (int k = 0; k < 8; k++) {
-      if (c->progeny[k] != NULL && c->progeny[k]->count > 0) {
+      if (c->progeny[k] != NULL && c->progeny[k]->hydro.count > 0) {
         runner_do_recv_part(r, c->progeny[k], clear_sorts, 0);
         ti_hydro_end_min =
-            min(ti_hydro_end_min, c->progeny[k]->ti_hydro_end_min);
+            min(ti_hydro_end_min, c->progeny[k]->hydro.ti_end_min);
         ti_hydro_end_max =
-            max(ti_hydro_end_max, c->progeny[k]->ti_hydro_end_max);
-        h_max = max(h_max, c->progeny[k]->h_max);
+            max(ti_hydro_end_max, c->progeny[k]->hydro.ti_end_max);
+        h_max = max(h_max, c->progeny[k]->hydro.h_max);
       }
     }
   }
@@ -1913,10 +2940,10 @@ void runner_do_recv_part(struct runner *r, struct cell *c, int clear_sorts,
 #endif
 
   /* ... and store. */
-  // c->ti_hydro_end_min = ti_hydro_end_min;
-  // c->ti_hydro_end_max = ti_hydro_end_max;
-  c->ti_old_part = ti_current;
-  c->h_max = h_max;
+  // c->hydro.ti_end_min = ti_hydro_end_min;
+  // c->hydro.ti_end_max = ti_hydro_end_max;
+  c->hydro.ti_old_part = ti_current;
+  c->hydro.h_max = h_max;
 
   if (timer) TIMER_TOC(timer_dorecv_part);
 
@@ -1936,8 +2963,8 @@ void runner_do_recv_gpart(struct runner *r, struct cell *c, int timer) {
 
 #ifdef WITH_MPI
 
-  const struct gpart *restrict gparts = c->gparts;
-  const size_t nr_gparts = c->gcount;
+  const struct gpart *restrict gparts = c->grav.parts;
+  const size_t nr_gparts = c->grav.count;
   const integertime_t ti_current = r->e->ti_current;
 
   TIMER_TIC;
@@ -1959,11 +2986,6 @@ void runner_do_recv_gpart(struct runner *r, struct cell *c, int timer) {
       if (gparts[k].time_bin == time_bin_inhibited) continue;
       time_bin_min = min(time_bin_min, gparts[k].time_bin);
       time_bin_max = max(time_bin_max, gparts[k].time_bin);
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (gparts[k].ti_drift != ti_current)
-        error("Received un-drifted g-particle !");
-#endif
     }
 
     /* Convert into a time */
@@ -1974,12 +2996,12 @@ void runner_do_recv_gpart(struct runner *r, struct cell *c, int timer) {
   /* Otherwise, recurse and collect. */
   else {
     for (int k = 0; k < 8; k++) {
-      if (c->progeny[k] != NULL && c->progeny[k]->gcount > 0) {
+      if (c->progeny[k] != NULL && c->progeny[k]->grav.count > 0) {
         runner_do_recv_gpart(r, c->progeny[k], 0);
         ti_gravity_end_min =
-            min(ti_gravity_end_min, c->progeny[k]->ti_gravity_end_min);
+            min(ti_gravity_end_min, c->progeny[k]->grav.ti_end_min);
         ti_gravity_end_max =
-            max(ti_gravity_end_max, c->progeny[k]->ti_gravity_end_max);
+            max(ti_gravity_end_max, c->progeny[k]->grav.ti_end_max);
       }
     }
   }
@@ -1993,9 +3015,9 @@ void runner_do_recv_gpart(struct runner *r, struct cell *c, int timer) {
 #endif
 
   /* ... and store. */
-  // c->ti_gravity_end_min = ti_gravity_end_min;
-  // c->ti_gravity_end_max = ti_gravity_end_max;
-  c->ti_old_gpart = ti_current;
+  // c->grav.ti_end_min = ti_gravity_end_min;
+  // c->grav.ti_end_max = ti_gravity_end_max;
+  c->grav.ti_old_part = ti_current;
 
   if (timer) TIMER_TOC(timer_dorecv_gpart);
 
@@ -2009,72 +3031,79 @@ void runner_do_recv_gpart(struct runner *r, struct cell *c, int timer) {
  *
  * @param r The runner thread.
  * @param c The cell.
+ * @param clear_sorts Should we clear the sort flag and hence trigger a sort ?
  * @param timer Are we timing this ?
  */
-void runner_do_recv_spart(struct runner *r, struct cell *c, int timer) {
+void runner_do_recv_spart(struct runner *r, struct cell *c, int clear_sorts,
+                          int timer) {
 
 #ifdef WITH_MPI
 
-  const struct spart *restrict sparts = c->sparts;
-  const size_t nr_sparts = c->scount;
+  struct spart *restrict sparts = c->stars.parts;
+  const size_t nr_sparts = c->stars.count;
   const integertime_t ti_current = r->e->ti_current;
 
   TIMER_TIC;
 
-  integertime_t ti_gravity_end_min = max_nr_timesteps;
-  integertime_t ti_gravity_end_max = 0;
+  integertime_t ti_stars_end_min = max_nr_timesteps;
+  integertime_t ti_stars_end_max = 0;
   timebin_t time_bin_min = num_time_bins;
   timebin_t time_bin_max = 0;
+  float h_max = 0.f;
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (c->nodeID == engine_rank) error("Updating a local cell!");
 #endif
 
+  /* Clear this cell's sorted mask. */
+  if (clear_sorts) c->stars.sorted = 0;
+
   /* If this cell is a leaf, collect the particle data. */
   if (!c->split) {
 
     /* Collect everything... */
     for (size_t k = 0; k < nr_sparts; k++) {
+#ifdef DEBUG_INTERACTIONS_STARS
+      sparts[k].num_ngb_force = 0;
+#endif
       if (sparts[k].time_bin == time_bin_inhibited) continue;
       time_bin_min = min(time_bin_min, sparts[k].time_bin);
       time_bin_max = max(time_bin_max, sparts[k].time_bin);
-
-#ifdef SWIFT_DEBUG_CHECKS
-      if (sparts[k].ti_drift != ti_current)
-        error("Received un-drifted s-particle !");
-#endif
+      h_max = max(h_max, sparts[k].h);
     }
 
     /* Convert into a time */
-    ti_gravity_end_min = get_integer_time_end(ti_current, time_bin_min);
-    ti_gravity_end_max = get_integer_time_end(ti_current, time_bin_max);
+    ti_stars_end_min = get_integer_time_end(ti_current, time_bin_min);
+    ti_stars_end_max = get_integer_time_end(ti_current, time_bin_max);
   }
 
   /* Otherwise, recurse and collect. */
   else {
     for (int k = 0; k < 8; k++) {
-      if (c->progeny[k] != NULL && c->progeny[k]->scount > 0) {
-        runner_do_recv_spart(r, c->progeny[k], 0);
-        ti_gravity_end_min =
-            min(ti_gravity_end_min, c->progeny[k]->ti_gravity_end_min);
-        ti_gravity_end_max =
-            max(ti_gravity_end_max, c->progeny[k]->ti_gravity_end_max);
+      if (c->progeny[k] != NULL && c->progeny[k]->stars.count > 0) {
+        runner_do_recv_spart(r, c->progeny[k], clear_sorts, 0);
+        ti_stars_end_min =
+            min(ti_stars_end_min, c->progeny[k]->stars.ti_end_min);
+        ti_stars_end_max =
+            max(ti_stars_end_max, c->progeny[k]->grav.ti_end_max);
+        h_max = max(h_max, c->progeny[k]->stars.h_max);
       }
     }
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (ti_gravity_end_min < ti_current)
+  if (ti_stars_end_min < ti_current)
     error(
         "Received a cell at an incorrect time c->ti_end_min=%lld, "
         "e->ti_current=%lld.",
-        ti_gravity_end_min, ti_current);
+        ti_stars_end_min, ti_current);
 #endif
 
   /* ... and store. */
-  c->ti_gravity_end_min = ti_gravity_end_min;
-  c->ti_gravity_end_max = ti_gravity_end_max;
-  c->ti_old_gpart = ti_current;
+  // c->grav.ti_end_min = ti_gravity_end_min;
+  // c->grav.ti_end_max = ti_gravity_end_max;
+  c->stars.ti_old_part = ti_current;
+  c->stars.h_max = h_max;
 
   if (timer) TIMER_TOC(timer_dorecv_spart);
 
@@ -2155,10 +3184,16 @@ void *runner_main(void *data) {
 #endif
           else if (t->subtype == task_subtype_force)
             runner_doself2_branch_force(r, ci);
+          else if (t->subtype == task_subtype_limiter)
+            runner_doself2_branch_limiter(r, ci);
           else if (t->subtype == task_subtype_grav)
             runner_doself_recursive_grav(r, ci, 1);
           else if (t->subtype == task_subtype_external_grav)
             runner_do_grav_external(r, ci, 1);
+          else if (t->subtype == task_subtype_stars_density)
+            runner_doself_branch_stars_density(r, ci);
+          else if (t->subtype == task_subtype_stars_feedback)
+            runner_doself_branch_stars_feedback(r, ci);
           else
             error("Unknown/invalid task subtype (%d).", t->subtype);
           break;
@@ -2172,8 +3207,14 @@ void *runner_main(void *data) {
 #endif
           else if (t->subtype == task_subtype_force)
             runner_dopair2_branch_force(r, ci, cj);
+          else if (t->subtype == task_subtype_limiter)
+            runner_dopair2_branch_limiter(r, ci, cj);
           else if (t->subtype == task_subtype_grav)
             runner_dopair_recursive_grav(r, ci, cj, 1);
+          else if (t->subtype == task_subtype_stars_density)
+            runner_dopair_branch_stars_density(r, ci, cj);
+          else if (t->subtype == task_subtype_stars_feedback)
+            runner_dopair_branch_stars_feedback(r, ci, cj);
           else
             error("Unknown/invalid task subtype (%d).", t->subtype);
           break;
@@ -2187,6 +3228,12 @@ void *runner_main(void *data) {
 #endif
           else if (t->subtype == task_subtype_force)
             runner_dosub_self2_force(r, ci, 1);
+          else if (t->subtype == task_subtype_limiter)
+            runner_dosub_self2_limiter(r, ci, 1);
+          else if (t->subtype == task_subtype_stars_density)
+            runner_dosub_self_stars_density(r, ci, 1);
+          else if (t->subtype == task_subtype_stars_feedback)
+            runner_dosub_self_stars_feedback(r, ci, 1);
           else
             error("Unknown/invalid task subtype (%d).", t->subtype);
           break;
@@ -2200,14 +3247,29 @@ void *runner_main(void *data) {
 #endif
           else if (t->subtype == task_subtype_force)
             runner_dosub_pair2_force(r, ci, cj, t->flags, 1);
+          else if (t->subtype == task_subtype_limiter)
+            runner_dosub_pair2_limiter(r, ci, cj, t->flags, 1);
+          else if (t->subtype == task_subtype_stars_density)
+            runner_dosub_pair_stars_density(r, ci, cj, t->flags, 1);
+          else if (t->subtype == task_subtype_stars_feedback)
+            runner_dosub_pair_stars_feedback(r, ci, cj, t->flags, 1);
           else
             error("Unknown/invalid task subtype (%d).", t->subtype);
           break;
 
         case task_type_sort:
           /* Cleanup only if any of the indices went stale. */
-          runner_do_sort(r, ci, t->flags,
-                         ci->dx_max_sort_old > space_maxreldx * ci->dmin, 1);
+          runner_do_hydro_sort(
+              r, ci, t->flags,
+              ci->hydro.dx_max_sort_old > space_maxreldx * ci->dmin, 1);
+          /* Reset the sort flags as our work here is done. */
+          t->flags = 0;
+          break;
+        case task_type_stars_sort:
+          /* Cleanup only if any of the indices went stale. */
+          runner_do_stars_sort(
+              r, ci, t->flags,
+              ci->stars.dx_max_sort_old > space_maxreldx * ci->dmin, 1);
           /* Reset the sort flags as our work here is done. */
           t->flags = 0;
           break;
@@ -2222,9 +3284,15 @@ void *runner_main(void *data) {
           runner_do_extra_ghost(r, ci, 1);
           break;
 #endif
+        case task_type_stars_ghost:
+          runner_do_stars_ghost(r, ci, 1);
+          break;
         case task_type_drift_part:
           runner_do_drift_part(r, ci, 1);
           break;
+        case task_type_drift_spart:
+          runner_do_drift_spart(r, ci, 1);
+          break;
         case task_type_drift_gpart:
           runner_do_drift_gpart(r, ci, 1);
           break;
@@ -2234,12 +3302,21 @@ void *runner_main(void *data) {
         case task_type_kick2:
           runner_do_kick2(r, ci, 1);
           break;
-        case task_type_end_force:
-          runner_do_end_force(r, ci, 1);
+        case task_type_end_hydro_force:
+          runner_do_end_hydro_force(r, ci, 1);
+          break;
+        case task_type_end_grav_force:
+          runner_do_end_grav_force(r, ci, 1);
+          break;
+        case task_type_logger:
+          runner_do_logger(r, ci, 1);
           break;
         case task_type_timestep:
           runner_do_timestep(r, ci, 1);
           break;
+        case task_type_timestep_limiter:
+          runner_do_limiter(r, ci, 0, 1);
+          break;
 #ifdef WITH_MPI
         case task_type_send:
           if (t->subtype == task_subtype_tend) {
@@ -2256,10 +3333,12 @@ void *runner_main(void *data) {
             runner_do_recv_part(r, ci, 0, 1);
           } else if (t->subtype == task_subtype_gradient) {
             runner_do_recv_part(r, ci, 0, 1);
+          } else if (t->subtype == task_subtype_limiter) {
+            runner_do_recv_part(r, ci, 0, 1);
           } else if (t->subtype == task_subtype_gpart) {
             runner_do_recv_gpart(r, ci, 1);
           } else if (t->subtype == task_subtype_spart) {
-            runner_do_recv_spart(r, ci, 1);
+            runner_do_recv_spart(r, ci, 1, 1);
           } else if (t->subtype == task_subtype_multipole) {
             cell_unpack_multipoles(ci, (struct gravity_tensors *)t->buff);
             free(t->buff);
@@ -2278,13 +3357,13 @@ void *runner_main(void *data) {
           runner_do_grav_long_range(r, t->ci, 1);
           break;
         case task_type_grav_mm:
-          runner_dopair_grav_mm_symmetric(r, t->ci, t->cj);
+          runner_dopair_grav_mm_progenies(r, t->flags, t->ci, t->cj);
           break;
         case task_type_cooling:
           runner_do_cooling(r, t->ci, 1);
           break;
-        case task_type_sourceterms:
-          runner_do_sourceterms(r, t->ci, 1);
+        case task_type_star_formation:
+          runner_do_star_formation(r, t->ci, 1);
           break;
         default:
           error("Unknown/invalid task type (%d).", t->type);
@@ -2312,3 +3391,74 @@ void *runner_main(void *data) {
   /* Be kind, rewind. */
   return NULL;
 }
+
+/**
+ * @brief Write the required particles through the logger.
+ *
+ * @param r The runner thread.
+ * @param c The cell.
+ * @param timer Are we timing this ?
+ */
+void runner_do_logger(struct runner *r, struct cell *c, int timer) {
+
+#ifdef WITH_LOGGER
+  TIMER_TIC;
+
+  const struct engine *e = r->e;
+  struct part *restrict parts = c->hydro.parts;
+  struct xpart *restrict xparts = c->hydro.xparts;
+  const int count = c->hydro.count;
+
+  /* Anything to do here? */
+  if (!cell_is_starting_hydro(c, e) && !cell_is_starting_gravity(c, e)) return;
+
+  /* Recurse? Avoid spending too much time in useless cells. */
+  if (c->split) {
+    for (int k = 0; k < 8; k++)
+      if (c->progeny[k] != NULL) runner_do_logger(r, c->progeny[k], 0);
+  } else {
+
+    /* Loop over the parts in this cell. */
+    for (int k = 0; k < count; k++) {
+
+      /* Get a handle on the part. */
+      struct part *restrict p = &parts[k];
+      struct xpart *restrict xp = &xparts[k];
+
+      /* If particle needs to be log */
+      /* This is the same function than part_is_active, except for
+       * debugging checks */
+      if (part_is_starting(p, e)) {
+
+        if (logger_should_write(&xp->logger_data, e->logger)) {
+          /* Write particle */
+          /* Currently writing everything, should adapt it through time */
+          logger_log_part(e->logger, p,
+                          logger_mask_data[logger_x].mask |
+                              logger_mask_data[logger_v].mask |
+                              logger_mask_data[logger_a].mask |
+                              logger_mask_data[logger_u].mask |
+                              logger_mask_data[logger_h].mask |
+                              logger_mask_data[logger_rho].mask |
+                              logger_mask_data[logger_consts].mask,
+                          &xp->logger_data.last_offset);
+
+          /* Set counter back to zero */
+          xp->logger_data.steps_since_last_output = 0;
+        } else
+          /* Update counter */
+          xp->logger_data.steps_since_last_output += 1;
+      }
+    }
+  }
+
+  if (c->grav.count > 0) error("gparts not implemented");
+
+  if (c->stars.count > 0) error("sparts not implemented");
+
+  if (timer) TIMER_TOC(timer_logger);
+
+#else
+  error("Logger disabled, please enable it during configuration");
+#endif
+}
diff --git a/src/runner.h b/src/runner.h
index e33a3e380e6097a67258d116d617483caca35086..b15dd001d928121ed5a60f0e9a601adb706b2191 100644
--- a/src/runner.h
+++ b/src/runner.h
@@ -69,17 +69,21 @@ struct runner {
 /* Function prototypes. */
 void runner_do_ghost(struct runner *r, struct cell *c, int timer);
 void runner_do_extra_ghost(struct runner *r, struct cell *c, int timer);
-void runner_do_sort(struct runner *r, struct cell *c, int flag, int cleanup,
-                    int clock);
+void runner_do_hydro_sort(struct runner *r, struct cell *c, int flag,
+                          int cleanup, int clock);
+void runner_do_stars_sort(struct runner *r, struct cell *c, int flag,
+                          int cleanup, int clock);
 void runner_do_drift_part(struct runner *r, struct cell *c, int timer);
 void runner_do_drift_gpart(struct runner *r, struct cell *c, int timer);
+void runner_do_drift_spart(struct runner *r, struct cell *c, int timer);
 void runner_do_kick1(struct runner *r, struct cell *c, int timer);
 void runner_do_kick2(struct runner *r, struct cell *c, int timer);
-void runner_do_end_force(struct runner *r, struct cell *c, int timer);
+void runner_do_end_hydro_force(struct runner *r, struct cell *c, int timer);
 void runner_do_init(struct runner *r, struct cell *c, int timer);
 void runner_do_cooling(struct runner *r, struct cell *c, int timer);
 void runner_do_grav_external(struct runner *r, struct cell *c, int timer);
 void runner_do_grav_fft(struct runner *r, int timer);
+void runner_do_logger(struct runner *r, struct cell *c, int timer);
 void *runner_main(void *data);
 void runner_do_unskip_mapper(void *map_data, int num_elements,
                              void *extra_data);
diff --git a/src/runner_doiact.h b/src/runner_doiact.h
index 942455194da947c3d3ef170f674d9d7b816f9381..854f8b898df93be93bb020a46606a415170fe980 100644
--- a/src/runner_doiact.h
+++ b/src/runner_doiact.h
@@ -145,10 +145,10 @@ void DOPAIR1_NAIVE(struct runner *r, struct cell *restrict ci,
   /* Anything to do here? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
 
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
 
   /* Cosmological terms */
   const float a = cosmo->a;
@@ -168,6 +168,10 @@ void DOPAIR1_NAIVE(struct runner *r, struct cell *restrict ci,
 
     /* Get a hold of the ith part in ci. */
     struct part *restrict pi = &parts_i[pid];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     const int pi_active = part_is_active(pi, e);
     const float hi = pi->h;
     const float hig2 = hi * hi * kernel_gamma2;
@@ -180,6 +184,10 @@ void DOPAIR1_NAIVE(struct runner *r, struct cell *restrict ci,
 
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts_j[pjd];
+
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       const float hj = pj->h;
       const float hjg2 = hj * hj * kernel_gamma2;
       const int pj_active = part_is_active(pj, e);
@@ -205,6 +213,7 @@ void DOPAIR1_NAIVE(struct runner *r, struct cell *restrict ci,
         IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       }
       if (r2 < hjg2 && pj_active) {
@@ -216,6 +225,7 @@ void DOPAIR1_NAIVE(struct runner *r, struct cell *restrict ci,
         IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
       }
 
@@ -245,10 +255,10 @@ void DOPAIR2_NAIVE(struct runner *r, struct cell *restrict ci,
   /* Anything to do here? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
 
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
 
   /* Cosmological terms */
   const float a = cosmo->a;
@@ -268,6 +278,10 @@ void DOPAIR2_NAIVE(struct runner *r, struct cell *restrict ci,
 
     /* Get a hold of the ith part in ci. */
     struct part *restrict pi = &parts_i[pid];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     const int pi_active = part_is_active(pi, e);
     const float hi = pi->h;
     const float hig2 = hi * hi * kernel_gamma2;
@@ -280,6 +294,10 @@ void DOPAIR2_NAIVE(struct runner *r, struct cell *restrict ci,
 
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts_j[pjd];
+
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       const int pj_active = part_is_active(pj, e);
       const float hj = pj->h;
       const float hjg2 = hj * hj * kernel_gamma2;
@@ -307,12 +325,14 @@ void DOPAIR2_NAIVE(struct runner *r, struct cell *restrict ci,
           IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         } else if (pi_active) {
 
           IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         } else if (pj_active) {
 
@@ -323,6 +343,7 @@ void DOPAIR2_NAIVE(struct runner *r, struct cell *restrict ci,
           IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
         }
       }
@@ -354,14 +375,18 @@ void DOSELF1_NAIVE(struct runner *r, struct cell *restrict c) {
   const float a = cosmo->a;
   const float H = cosmo->H;
 
-  const int count = c->count;
-  struct part *restrict parts = c->parts;
+  const int count = c->hydro.count;
+  struct part *restrict parts = c->hydro.parts;
 
   /* Loop over the parts in ci. */
   for (int pid = 0; pid < count; pid++) {
 
     /* Get a hold of the ith part in ci. */
     struct part *restrict pi = &parts[pid];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     const int pi_active = part_is_active(pi, e);
     const float hi = pi->h;
     const float hig2 = hi * hi * kernel_gamma2;
@@ -374,6 +399,10 @@ void DOSELF1_NAIVE(struct runner *r, struct cell *restrict c) {
 
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts[pjd];
+
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       const float hj = pj->h;
       const float hjg2 = hj * hj * kernel_gamma2;
       const int pj_active = part_is_active(pj, e);
@@ -402,12 +431,14 @@ void DOSELF1_NAIVE(struct runner *r, struct cell *restrict c) {
         IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       } else if (doi) {
 
         IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       } else if (doj) {
 
@@ -418,6 +449,7 @@ void DOSELF1_NAIVE(struct runner *r, struct cell *restrict c) {
         IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
       }
     } /* loop over the parts in cj. */
@@ -448,14 +480,18 @@ void DOSELF2_NAIVE(struct runner *r, struct cell *restrict c) {
   const float a = cosmo->a;
   const float H = cosmo->H;
 
-  const int count = c->count;
-  struct part *restrict parts = c->parts;
+  const int count = c->hydro.count;
+  struct part *restrict parts = c->hydro.parts;
 
   /* Loop over the parts in ci. */
   for (int pid = 0; pid < count; pid++) {
 
     /* Get a hold of the ith part in ci. */
     struct part *restrict pi = &parts[pid];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     const int pi_active = part_is_active(pi, e);
     const float hi = pi->h;
     const float hig2 = hi * hi * kernel_gamma2;
@@ -468,6 +504,10 @@ void DOSELF2_NAIVE(struct runner *r, struct cell *restrict c) {
 
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts[pjd];
+
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       const float hj = pj->h;
       const float hjg2 = hj * hj * kernel_gamma2;
       const int pj_active = part_is_active(pj, e);
@@ -496,12 +536,14 @@ void DOSELF2_NAIVE(struct runner *r, struct cell *restrict c) {
         IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       } else if (doi) {
 
         IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       } else if (doj) {
 
@@ -512,6 +554,7 @@ void DOSELF2_NAIVE(struct runner *r, struct cell *restrict c) {
         IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
       }
     } /* loop over the parts in cj. */
@@ -544,8 +587,8 @@ void DOPAIR_SUBSET_NAIVE(struct runner *r, struct cell *restrict ci,
 
   TIMER_TIC;
 
-  const int count_j = cj->count;
-  struct part *restrict parts_j = cj->parts;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_j = cj->hydro.parts;
 
   /* Cosmological terms */
   const float a = cosmo->a;
@@ -572,6 +615,9 @@ void DOPAIR_SUBSET_NAIVE(struct runner *r, struct cell *restrict ci,
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts_j[pjd];
 
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       /* Compute the pairwise distance. */
       float r2 = 0.0f;
       float dx[3];
@@ -594,6 +640,7 @@ void DOPAIR_SUBSET_NAIVE(struct runner *r, struct cell *restrict ci,
         IACT_NONSYM(r2, dx, hi, pj->h, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hi, pj->h, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, pj->h, pi, pj, a, H);
 #endif
       }
     } /* loop over the parts in cj. */
@@ -626,16 +673,16 @@ void DOPAIR_SUBSET(struct runner *r, struct cell *restrict ci,
 
   TIMER_TIC;
 
-  const int count_j = cj->count;
-  struct part *restrict parts_j = cj->parts;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_j = cj->hydro.parts;
 
   /* Cosmological terms */
   const float a = cosmo->a;
   const float H = cosmo->H;
 
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_j = cj->sort[sid];
-  const float dxj = cj->dx_max_sort;
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
+  const float dxj = cj->hydro.dx_max_sort;
 
   /* Parts are on the left? */
   if (!flipped) {
@@ -658,6 +705,10 @@ void DOPAIR_SUBSET(struct runner *r, struct cell *restrict ci,
 
         /* Get a pointer to the jth particle. */
         struct part *restrict pj = &parts_j[sort_j[pjd].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
         const double pjx = pj->x[0];
         const double pjy = pj->x[1];
@@ -682,6 +733,7 @@ void DOPAIR_SUBSET(struct runner *r, struct cell *restrict ci,
           IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         }
       } /* loop over the parts in cj. */
@@ -709,6 +761,10 @@ void DOPAIR_SUBSET(struct runner *r, struct cell *restrict ci,
 
         /* Get a pointer to the jth particle. */
         struct part *restrict pj = &parts_j[sort_j[pjd].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
         const double pjx = pj->x[0];
         const double pjy = pj->x[1];
@@ -733,6 +789,7 @@ void DOPAIR_SUBSET(struct runner *r, struct cell *restrict ci,
           IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         }
       } /* loop over the parts in cj. */
@@ -760,6 +817,9 @@ void DOPAIR_SUBSET_BRANCH(struct runner *r, struct cell *restrict ci,
 
   const struct engine *e = r->e;
 
+  /* Anything to do here? */
+  if (cj->hydro.count == 0) return;
+
   /* Get the relative distance between the pairs, wrapping. */
   double shift[3] = {0.0, 0.0, 0.0};
   for (int k = 0; k < 3; k++) {
@@ -782,8 +842,8 @@ void DOPAIR_SUBSET_BRANCH(struct runner *r, struct cell *restrict ci,
   sid = sortlistID[sid];
 
   /* Has the cell cj been sorted? */
-  if (!(cj->sorted & (1 << sid)) ||
-      cj->dx_max_sort_old > space_maxreldx * cj->dmin)
+  if (!(cj->hydro.sorted & (1 << sid)) ||
+      cj->hydro.dx_max_sort_old > space_maxreldx * cj->dmin)
     error("Interacting unsorted cells.");
 #endif
 
@@ -822,8 +882,8 @@ void DOSELF_SUBSET(struct runner *r, struct cell *restrict ci,
   const float a = cosmo->a;
   const float H = cosmo->H;
 
-  const int count_i = ci->count;
-  struct part *restrict parts_j = ci->parts;
+  const int count_i = ci->hydro.count;
+  struct part *restrict parts_j = ci->hydro.parts;
 
   /* Loop over the parts in ci. */
   for (int pid = 0; pid < count; pid++) {
@@ -845,6 +905,10 @@ void DOSELF_SUBSET(struct runner *r, struct cell *restrict ci,
 
       /* Get a pointer to the jth particle. */
       struct part *restrict pj = &parts_j[pjd];
+
+      /* Skip inhibited particles. */
+      if (part_is_inhibited(pj, e)) continue;
+
       const float hj = pj->h;
 
       /* Compute the pairwise distance. */
@@ -868,6 +932,7 @@ void DOSELF_SUBSET(struct runner *r, struct cell *restrict ci,
         IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
         runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
       }
     } /* loop over the parts in cj. */
@@ -919,29 +984,32 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
   for (int k = 0; k < 3; k++) rshift += shift[k] * runner_shift[sid][k];
 
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_i = ci->sort[sid];
-  const struct entry *restrict sort_j = cj->sort[sid];
+  const struct entry *restrict sort_i = ci->hydro.sort[sid];
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Some constants used to checks that the parts are in the right frame */
   const float shift_threshold_x =
-      2. * ci->width[0] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[0] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_y =
-      2. * ci->width[1] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[1] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_z =
-      2. * ci->width[2] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[2] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
 #endif /* SWIFT_DEBUG_CHECKS */
 
   /* Get some other useful values. */
-  const double hi_max = ci->h_max * kernel_gamma - rshift;
-  const double hj_max = cj->h_max * kernel_gamma;
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const double hi_max = ci->hydro.h_max * kernel_gamma - rshift;
+  const double hj_max = cj->hydro.h_max * kernel_gamma;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
   const double di_max = sort_i[count_i - 1].d - rshift;
   const double dj_min = sort_j[0].d;
-  const float dx_max = (ci->dx_max_sort + cj->dx_max_sort);
+  const float dx_max = (ci->hydro.dx_max_sort + cj->hydro.dx_max_sort);
 
   /* Cosmological terms */
   const float a = cosmo->a;
@@ -975,6 +1043,10 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
         /* Recover pj */
         struct part *pj = &parts_j[sort_j[pjd].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
         const float pjx = pj->x[0] - cj->loc[0];
         const float pjy = pj->x[1] - cj->loc[1];
@@ -1024,6 +1096,7 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
           IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         }
       } /* loop over the parts in cj. */
@@ -1058,6 +1131,10 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
         /* Recover pi */
         struct part *pi = &parts_i[sort_i[pid].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pi, e)) continue;
+
         const float hi = pi->h;
         const float pix = pi->x[0] - (cj->loc[0] + shift[0]);
         const float piy = pi->x[1] - (cj->loc[1] + shift[1]);
@@ -1107,6 +1184,7 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
           IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
         }
       } /* loop over the parts in ci. */
@@ -1129,6 +1207,9 @@ void DOPAIR1_BRANCH(struct runner *r, struct cell *ci, struct cell *cj) {
 
   const struct engine *restrict e = r->e;
 
+  /* Anything to do here? */
+  if (ci->hydro.count == 0 || cj->hydro.count == 0) return;
+
   /* Anything to do here? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
 
@@ -1141,49 +1222,55 @@ void DOPAIR1_BRANCH(struct runner *r, struct cell *ci, struct cell *cj) {
   const int sid = space_getsid(e->s, &ci, &cj, shift);
 
   /* Have the cells been sorted? */
-  if (!(ci->sorted & (1 << sid)) ||
-      ci->dx_max_sort_old > space_maxreldx * ci->dmin)
+  if (!(ci->hydro.sorted & (1 << sid)) ||
+      ci->hydro.dx_max_sort_old > space_maxreldx * ci->dmin)
     error("Interacting unsorted cells.");
-  if (!(cj->sorted & (1 << sid)) ||
-      cj->dx_max_sort_old > space_maxreldx * cj->dmin)
+  if (!(cj->hydro.sorted & (1 << sid)) ||
+      cj->hydro.dx_max_sort_old > space_maxreldx * cj->dmin)
     error("Interacting unsorted cells.");
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_i = ci->sort[sid];
-  const struct entry *restrict sort_j = cj->sort[sid];
+  const struct entry *restrict sort_i = ci->hydro.sort[sid];
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
 
   /* Check that the dx_max_sort values in the cell are indeed an upper
      bound on particle movement. */
-  for (int pid = 0; pid < ci->count; pid++) {
-    const struct part *p = &ci->parts[sort_i[pid].i];
+  for (int pid = 0; pid < ci->hydro.count; pid++) {
+    const struct part *p = &ci->hydro.parts[sort_i[pid].i];
+    if (part_is_inhibited(p, e)) continue;
+
     const float d = p->x[0] * runner_shift[sid][0] +
                     p->x[1] * runner_shift[sid][1] +
                     p->x[2] * runner_shift[sid][2];
-    if (fabsf(d - sort_i[pid].d) - ci->dx_max_sort >
-            1.0e-4 * max(fabsf(d), ci->dx_max_sort_old) &&
-        fabsf(d - sort_i[pid].d) - ci->dx_max_sort > ci->width[0] * 1.0e-10)
+    if (fabsf(d - sort_i[pid].d) - ci->hydro.dx_max_sort >
+            1.0e-4 * max(fabsf(d), ci->hydro.dx_max_sort_old) &&
+        fabsf(d - sort_i[pid].d) - ci->hydro.dx_max_sort >
+            ci->width[0] * 1.0e-10)
       error(
           "particle shift diff exceeds dx_max_sort in cell ci. ci->nodeID=%d "
-          "cj->nodeID=%d d=%e sort_i[pid].d=%e ci->dx_max_sort=%e "
-          "ci->dx_max_sort_old=%e",
-          ci->nodeID, cj->nodeID, d, sort_i[pid].d, ci->dx_max_sort,
-          ci->dx_max_sort_old);
+          "cj->nodeID=%d d=%e sort_i[pid].d=%e ci->hydro.dx_max_sort=%e "
+          "ci->hydro.dx_max_sort_old=%e",
+          ci->nodeID, cj->nodeID, d, sort_i[pid].d, ci->hydro.dx_max_sort,
+          ci->hydro.dx_max_sort_old);
   }
-  for (int pjd = 0; pjd < cj->count; pjd++) {
-    const struct part *p = &cj->parts[sort_j[pjd].i];
+  for (int pjd = 0; pjd < cj->hydro.count; pjd++) {
+    const struct part *p = &cj->hydro.parts[sort_j[pjd].i];
+    if (part_is_inhibited(p, e)) continue;
+
     const float d = p->x[0] * runner_shift[sid][0] +
                     p->x[1] * runner_shift[sid][1] +
                     p->x[2] * runner_shift[sid][2];
-    if ((fabsf(d - sort_j[pjd].d) - cj->dx_max_sort) >
-            1.0e-4 * max(fabsf(d), cj->dx_max_sort_old) &&
-        (fabsf(d - sort_j[pjd].d) - cj->dx_max_sort) > cj->width[0] * 1.0e-10)
+    if ((fabsf(d - sort_j[pjd].d) - cj->hydro.dx_max_sort) >
+            1.0e-4 * max(fabsf(d), cj->hydro.dx_max_sort_old) &&
+        (fabsf(d - sort_j[pjd].d) - cj->hydro.dx_max_sort) >
+            cj->width[0] * 1.0e-10)
       error(
           "particle shift diff exceeds dx_max_sort in cell cj. cj->nodeID=%d "
-          "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->dx_max_sort=%e "
-          "cj->dx_max_sort_old=%e",
-          cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->dx_max_sort,
-          cj->dx_max_sort_old);
+          "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->hydro.dx_max_sort=%e "
+          "cj->hydro.dx_max_sort_old=%e",
+          cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->hydro.dx_max_sort,
+          cj->hydro.dx_max_sort_old);
   }
 #endif /* SWIFT_DEBUG_CHECKS */
 
@@ -1222,33 +1309,36 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
   for (int k = 0; k < 3; k++) rshift += shift[k] * runner_shift[sid][k];
 
   /* Pick-out the sorted lists. */
-  struct entry *restrict sort_i = ci->sort[sid];
-  struct entry *restrict sort_j = cj->sort[sid];
+  struct entry *restrict sort_i = ci->hydro.sort[sid];
+  struct entry *restrict sort_j = cj->hydro.sort[sid];
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Some constants used to checks that the parts are in the right frame */
   const float shift_threshold_x =
-      2. * ci->width[0] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[0] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_y =
-      2. * ci->width[1] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[1] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
   const float shift_threshold_z =
-      2. * ci->width[2] + 2. * max(ci->dx_max_part, cj->dx_max_part);
+      2. * ci->width[2] +
+      2. * max(ci->hydro.dx_max_part, cj->hydro.dx_max_part);
 #endif /* SWIFT_DEBUG_CHECKS */
 
   /* Get some other useful values. */
-  const double hi_max = ci->h_max;
-  const double hj_max = cj->h_max;
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const double hi_max = ci->hydro.h_max;
+  const double hj_max = cj->hydro.h_max;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
 
   /* Cosmological terms */
   const float a = cosmo->a;
   const float H = cosmo->H;
 
   /* Maximal displacement since last rebuild */
-  const double dx_max = (ci->dx_max_sort + cj->dx_max_sort);
+  const double dx_max = (ci->hydro.dx_max_sort + cj->hydro.dx_max_sort);
 
   /* Position on the axis of the particles closest to the interface */
   const double di_max = sort_i[count_i - 1].d;
@@ -1307,6 +1397,10 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
     /* Get a hold of the ith part in ci. */
     struct part *pi = &parts_i[sort_i[pid].i];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     const float hi = pi->h;
 
     /* Is there anything we need to interact with (for this specific hi) ? */
@@ -1337,7 +1431,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
         const float pjz = pj->x[2] - shift_j[2];
 
         /* Compute the pairwise distance. */
-        float dx[3] = {pjx - pix, pjy - piy, pjz - piz};
+        const float dx[3] = {pjx - pix, pjy - piy, pjz - piz};
         const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1380,6 +1474,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
           IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
         }
       } /* loop over the active parts in cj. */
@@ -1392,6 +1487,10 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
         /* Recover pj */
         struct part *pj = &parts_j[sort_j[pjd].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
 
         /* Get the position of pj in the right frame */
@@ -1400,7 +1499,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
         const float pjz = pj->x[2] - shift_j[2];
 
         /* Compute the pairwise distance. */
-        float dx[3] = {pix - pjx, piy - pjy, piz - pjz};
+        const float dx[3] = {pix - pjx, piy - pjy, piz - pjz};
         const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1445,11 +1544,13 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
             IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
           } else {
             IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
           }
         }
@@ -1466,6 +1567,10 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
     /* Get a hold of the jth part in cj. */
     struct part *pj = &parts_j[sort_j[pjd].i];
+
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pj, e)) continue;
+
     const float hj = pj->h;
 
     /* Is there anything we need to interact with (for this specific hj) ? */
@@ -1497,7 +1602,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
         const float piz = pi->x[2] - shift_i[2];
 
         /* Compute the pairwise distance. */
-        float dx[3] = {pix - pjx, piy - pjy, piz - pjz};
+        const float dx[3] = {pix - pjx, piy - pjy, piz - pjz};
         const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1540,6 +1645,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
           IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
         }
       } /* loop over the active parts in ci. */
@@ -1553,6 +1659,10 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
 
         /* Recover pi */
         struct part *pi = &parts_i[sort_i[pid].i];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pi, e)) continue;
+
         const float hi = pi->h;
         const float hig2 = hi * hi * kernel_gamma2;
 
@@ -1562,7 +1672,7 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
         const float piz = pi->x[2] - shift_i[2];
 
         /* Compute the pairwise distance. */
-        float dx[3] = {pjx - pix, pjy - piy, pjz - piz};
+        const float dx[3] = {pjx - pix, pjy - piy, pjz - piz};
         const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1608,11 +1718,13 @@ void DOPAIR2(struct runner *r, struct cell *ci, struct cell *cj, const int sid,
             IACT(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+            runner_iact_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
           } else {
             IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+            runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
           }
         }
@@ -1642,6 +1754,9 @@ void DOPAIR2_BRANCH(struct runner *r, struct cell *ci, struct cell *cj) {
 
   const struct engine *restrict e = r->e;
 
+  /* Anything to do here? */
+  if (ci->hydro.count == 0 || cj->hydro.count == 0) return;
+
   /* Anything to do here? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
 
@@ -1654,49 +1769,55 @@ void DOPAIR2_BRANCH(struct runner *r, struct cell *ci, struct cell *cj) {
   const int sid = space_getsid(e->s, &ci, &cj, shift);
 
   /* Have the cells been sorted? */
-  if (!(ci->sorted & (1 << sid)) ||
-      ci->dx_max_sort_old > space_maxreldx * ci->dmin)
+  if (!(ci->hydro.sorted & (1 << sid)) ||
+      ci->hydro.dx_max_sort_old > space_maxreldx * ci->dmin)
     error("Interacting unsorted cells.");
-  if (!(cj->sorted & (1 << sid)) ||
-      cj->dx_max_sort_old > space_maxreldx * cj->dmin)
+  if (!(cj->hydro.sorted & (1 << sid)) ||
+      cj->hydro.dx_max_sort_old > space_maxreldx * cj->dmin)
     error("Interacting unsorted cells.");
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_i = ci->sort[sid];
-  const struct entry *restrict sort_j = cj->sort[sid];
+  const struct entry *restrict sort_i = ci->hydro.sort[sid];
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
 
   /* Check that the dx_max_sort values in the cell are indeed an upper
      bound on particle movement. */
-  for (int pid = 0; pid < ci->count; pid++) {
-    const struct part *p = &ci->parts[sort_i[pid].i];
+  for (int pid = 0; pid < ci->hydro.count; pid++) {
+    const struct part *p = &ci->hydro.parts[sort_i[pid].i];
+    if (part_is_inhibited(p, e)) continue;
+
     const float d = p->x[0] * runner_shift[sid][0] +
                     p->x[1] * runner_shift[sid][1] +
                     p->x[2] * runner_shift[sid][2];
-    if (fabsf(d - sort_i[pid].d) - ci->dx_max_sort >
-            1.0e-4 * max(fabsf(d), ci->dx_max_sort_old) &&
-        fabsf(d - sort_i[pid].d) - ci->dx_max_sort > ci->width[0] * 1.0e-10)
+    if (fabsf(d - sort_i[pid].d) - ci->hydro.dx_max_sort >
+            1.0e-4 * max(fabsf(d), ci->hydro.dx_max_sort_old) &&
+        fabsf(d - sort_i[pid].d) - ci->hydro.dx_max_sort >
+            ci->width[0] * 1.0e-10)
       error(
           "particle shift diff exceeds dx_max_sort in cell ci. ci->nodeID=%d "
-          "cj->nodeID=%d d=%e sort_i[pid].d=%e ci->dx_max_sort=%e "
-          "ci->dx_max_sort_old=%e",
-          ci->nodeID, cj->nodeID, d, sort_i[pid].d, ci->dx_max_sort,
-          ci->dx_max_sort_old);
+          "cj->nodeID=%d d=%e sort_i[pid].d=%e ci->hydro.dx_max_sort=%e "
+          "ci->hydro.dx_max_sort_old=%e",
+          ci->nodeID, cj->nodeID, d, sort_i[pid].d, ci->hydro.dx_max_sort,
+          ci->hydro.dx_max_sort_old);
   }
-  for (int pjd = 0; pjd < cj->count; pjd++) {
-    const struct part *p = &cj->parts[sort_j[pjd].i];
+  for (int pjd = 0; pjd < cj->hydro.count; pjd++) {
+    const struct part *p = &cj->hydro.parts[sort_j[pjd].i];
+    if (part_is_inhibited(p, e)) continue;
+
     const float d = p->x[0] * runner_shift[sid][0] +
                     p->x[1] * runner_shift[sid][1] +
                     p->x[2] * runner_shift[sid][2];
-    if (fabsf(d - sort_j[pjd].d) - cj->dx_max_sort >
-            1.0e-4 * max(fabsf(d), cj->dx_max_sort_old) &&
-        fabsf(d - sort_j[pjd].d) - cj->dx_max_sort > cj->width[0] * 1.0e-10)
+    if (fabsf(d - sort_j[pjd].d) - cj->hydro.dx_max_sort >
+            1.0e-4 * max(fabsf(d), cj->hydro.dx_max_sort_old) &&
+        fabsf(d - sort_j[pjd].d) - cj->hydro.dx_max_sort >
+            cj->width[0] * 1.0e-10)
       error(
           "particle shift diff exceeds dx_max_sort in cell cj. cj->nodeID=%d "
-          "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->dx_max_sort=%e "
-          "cj->dx_max_sort_old=%e",
-          cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->dx_max_sort,
-          cj->dx_max_sort_old);
+          "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->hydro.dx_max_sort=%e "
+          "cj->hydro.dx_max_sort_old=%e",
+          cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->hydro.dx_max_sort,
+          cj->hydro.dx_max_sort_old);
   }
 #endif /* SWIFT_DEBUG_CHECKS */
 
@@ -1726,8 +1847,8 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
 
   TIMER_TIC;
 
-  struct part *restrict parts = c->parts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  const int count = c->hydro.count;
 
   /* Set up indt. */
   int *indt = NULL;
@@ -1751,6 +1872,9 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
     /* Get a pointer to the ith particle. */
     struct part *restrict pi = &parts[pid];
 
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     /* Get the particle position and radius. */
     double pix[3];
     for (int k = 0; k < 3; k++) pix[k] = pi->x[k];
@@ -1789,6 +1913,7 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
           IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
         }
       } /* loop over all other particles. */
@@ -1805,6 +1930,10 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
 
         /* Get a pointer to the jth particle. */
         struct part *restrict pj = &parts[pjd];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
 
         /* Compute the pairwise distance. */
@@ -1817,6 +1946,8 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
         const int doj =
             (part_is_active(pj, e)) && (r2 < hj * hj * kernel_gamma2);
 
+        const int doi = (r2 < hig2);
+
 #ifdef SWIFT_DEBUG_CHECKS
         /* Check that particles have been drifted to the current time */
         if (pi->ti_drift != e->ti_current)
@@ -1826,26 +1957,32 @@ void DOSELF1(struct runner *r, struct cell *restrict c) {
 #endif
 
         /* Hit or miss? */
-        if (r2 < hig2 || doj) {
+        if (doi || doj) {
 
           /* Which parts need to be updated? */
-          if (r2 < hig2 && doj) {
+          if (doi && doj) {
+
             IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
-          } else if (!doj) {
+          } else if (doi) {
+
             IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
-          } else {
+          } else if (doj) {
+
             dx[0] = -dx[0];
             dx[1] = -dx[1];
             dx[2] = -dx[2];
             IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+            runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
           }
         }
@@ -1870,11 +2007,14 @@ void DOSELF1_BRANCH(struct runner *r, struct cell *c) {
 
   const struct engine *restrict e = r->e;
 
+  /* Anything to do here? */
+  if (c->hydro.count == 0) return;
+
   /* Anything to do here? */
   if (!cell_is_active_hydro(c, e)) return;
 
   /* Did we mess up the recursion? */
-  if (c->h_max_old * kernel_gamma > c->dmin)
+  if (c->hydro.h_max_old * kernel_gamma > c->dmin)
     error("Cell smaller than smoothing length");
 
   /* Check that cells are drifted. */
@@ -1903,8 +2043,8 @@ void DOSELF2(struct runner *r, struct cell *restrict c) {
 
   TIMER_TIC;
 
-  struct part *restrict parts = c->parts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  const int count = c->hydro.count;
 
   /* Set up indt. */
   int *indt = NULL;
@@ -1928,6 +2068,9 @@ void DOSELF2(struct runner *r, struct cell *restrict c) {
     /* Get a pointer to the ith particle. */
     struct part *restrict pi = &parts[pid];
 
+    /* Skip inhibited particles. */
+    if (part_is_inhibited(pi, e)) continue;
+
     /* Get the particle position and radius. */
     double pix[3];
     for (int k = 0; k < 3; k++) pix[k] = pi->x[k];
@@ -1966,6 +2109,7 @@ void DOSELF2(struct runner *r, struct cell *restrict c) {
           IACT_NONSYM(r2, dx, hj, hi, pj, pi, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
           runner_iact_nonsym_chemistry(r2, dx, hj, hi, pj, pi, a, H);
+          runner_iact_nonsym_star_formation(r2, dx, hj, hi, pj, pi, a, H);
 #endif
         }
       } /* loop over all other particles. */
@@ -1982,6 +2126,10 @@ void DOSELF2(struct runner *r, struct cell *restrict c) {
 
         /* Get a pointer to the jth particle. */
         struct part *restrict pj = &parts[pjd];
+
+        /* Skip inhibited particles. */
+        if (part_is_inhibited(pj, e)) continue;
+
         const float hj = pj->h;
 
         /* Compute the pairwise distance. */
@@ -2008,11 +2156,13 @@ void DOSELF2(struct runner *r, struct cell *restrict c) {
             IACT(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
           } else {
             IACT_NONSYM(r2, dx, hi, hj, pi, pj, a, H);
 #if (FUNCTION_TASK_LOOP == TASK_LOOP_DENSITY)
             runner_iact_nonsym_chemistry(r2, dx, hi, hj, pi, pj, a, H);
+            runner_iact_nonsym_star_formation(r2, dx, hi, hj, pi, pj, a, H);
 #endif
           }
         }
@@ -2037,11 +2187,14 @@ void DOSELF2_BRANCH(struct runner *r, struct cell *c) {
 
   const struct engine *restrict e = r->e;
 
+  /* Anything to do here? */
+  if (c->hydro.count == 0) return;
+
   /* Anything to do here? */
   if (!cell_is_active_hydro(c, e)) return;
 
   /* Did we mess up the recursion? */
-  if (c->h_max_old * kernel_gamma > c->dmin)
+  if (c->hydro.h_max_old * kernel_gamma > c->dmin)
     error("Cell smaller than smoothing length");
 
   /* Check that cells are drifted. */
@@ -2079,7 +2232,7 @@ void DOSUB_PAIR1(struct runner *r, struct cell *ci, struct cell *cj, int sid,
 
   /* Should we even bother? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
-  if (ci->count == 0 || cj->count == 0) return;
+  if (ci->hydro.count == 0 || cj->hydro.count == 0) return;
 
   /* Get the type of pair if not specified explicitly. */
   double shift[3];
@@ -2295,18 +2448,18 @@ void DOSUB_PAIR1(struct runner *r, struct cell *ci, struct cell *cj, int sid,
       error("Interacting undrifted cells.");
 
     /* Do any of the cells need to be sorted first? */
-    if (!(ci->sorted & (1 << sid)) ||
-        ci->dx_max_sort_old > ci->dmin * space_maxreldx)
+    if (!(ci->hydro.sorted & (1 << sid)) ||
+        ci->hydro.dx_max_sort_old > ci->dmin * space_maxreldx)
       error(
-          "Interacting unsorted cell. ci->dx_max_sort_old=%e ci->dmin=%e "
+          "Interacting unsorted cell. ci->hydro.dx_max_sort_old=%e ci->dmin=%e "
           "ci->sorted=%d sid=%d",
-          ci->dx_max_sort_old, ci->dmin, ci->sorted, sid);
-    if (!(cj->sorted & (1 << sid)) ||
-        cj->dx_max_sort_old > cj->dmin * space_maxreldx)
+          ci->hydro.dx_max_sort_old, ci->dmin, ci->hydro.sorted, sid);
+    if (!(cj->hydro.sorted & (1 << sid)) ||
+        cj->hydro.dx_max_sort_old > cj->dmin * space_maxreldx)
       error(
-          "Interacting unsorted cell. cj->dx_max_sort_old=%e cj->dmin=%e "
+          "Interacting unsorted cell. cj->hydro.dx_max_sort_old=%e cj->dmin=%e "
           "cj->sorted=%d sid=%d",
-          cj->dx_max_sort_old, cj->dmin, cj->sorted, sid);
+          cj->hydro.dx_max_sort_old, cj->dmin, cj->hydro.sorted, sid);
 
     /* Compute the interactions. */
     DOPAIR1_BRANCH(r, ci, cj);
@@ -2327,7 +2480,7 @@ void DOSUB_SELF1(struct runner *r, struct cell *ci, int gettimer) {
   TIMER_TIC;
 
   /* Should we even bother? */
-  if (ci->count == 0 || !cell_is_active_hydro(ci, r->e)) return;
+  if (ci->hydro.count == 0 || !cell_is_active_hydro(ci, r->e)) return;
 
   /* Recurse? */
   if (cell_can_recurse_in_self_hydro_task(ci)) {
@@ -2376,7 +2529,7 @@ void DOSUB_PAIR2(struct runner *r, struct cell *ci, struct cell *cj, int sid,
 
   /* Should we even bother? */
   if (!cell_is_active_hydro(ci, e) && !cell_is_active_hydro(cj, e)) return;
-  if (ci->count == 0 || cj->count == 0) return;
+  if (ci->hydro.count == 0 || cj->hydro.count == 0) return;
 
   /* Get the type of pair if not specified explicitly. */
   double shift[3];
@@ -2592,18 +2745,18 @@ void DOSUB_PAIR2(struct runner *r, struct cell *ci, struct cell *cj, int sid,
       error("Interacting undrifted cells.");
 
     /* Do any of the cells need to be sorted first? */
-    if (!(ci->sorted & (1 << sid)) ||
-        ci->dx_max_sort_old > ci->dmin * space_maxreldx)
+    if (!(ci->hydro.sorted & (1 << sid)) ||
+        ci->hydro.dx_max_sort_old > ci->dmin * space_maxreldx)
       error(
-          "Interacting unsorted cell. ci->dx_max_sort_old=%e ci->dmin=%e "
+          "Interacting unsorted cell. ci->hydro.dx_max_sort_old=%e ci->dmin=%e "
           "ci->sorted=%d sid=%d",
-          ci->dx_max_sort_old, ci->dmin, ci->sorted, sid);
-    if (!(cj->sorted & (1 << sid)) ||
-        cj->dx_max_sort_old > cj->dmin * space_maxreldx)
+          ci->hydro.dx_max_sort_old, ci->dmin, ci->hydro.sorted, sid);
+    if (!(cj->hydro.sorted & (1 << sid)) ||
+        cj->hydro.dx_max_sort_old > cj->dmin * space_maxreldx)
       error(
-          "Interacting unsorted cell. cj->dx_max_sort_old=%e cj->dmin=%e "
+          "Interacting unsorted cell. cj->hydro.dx_max_sort_old=%e cj->dmin=%e "
           "cj->sorted=%d sid=%d",
-          cj->dx_max_sort_old, cj->dmin, cj->sorted, sid);
+          cj->hydro.dx_max_sort_old, cj->dmin, cj->hydro.sorted, sid);
 
     /* Compute the interactions. */
     DOPAIR2_BRANCH(r, ci, cj);
@@ -2624,7 +2777,7 @@ void DOSUB_SELF2(struct runner *r, struct cell *ci, int gettimer) {
   TIMER_TIC;
 
   /* Should we even bother? */
-  if (ci->count == 0 || !cell_is_active_hydro(ci, r->e)) return;
+  if (ci->hydro.count == 0 || !cell_is_active_hydro(ci, r->e)) return;
 
   /* Recurse? */
   if (cell_can_recurse_in_self_hydro_task(ci)) {
@@ -2659,15 +2812,16 @@ void DOSUB_SUBSET(struct runner *r, struct cell *ci, struct part *parts,
   if (!cell_is_active_hydro(ci, e) &&
       (cj == NULL || !cell_is_active_hydro(cj, e)))
     return;
-  if (ci->count == 0 || (cj != NULL && cj->count == 0)) return;
+  if (ci->hydro.count == 0 || (cj != NULL && cj->hydro.count == 0)) return;
 
   /* Find out in which sub-cell of ci the parts are. */
   struct cell *sub = NULL;
   if (ci->split) {
     for (int k = 0; k < 8; k++) {
       if (ci->progeny[k] != NULL) {
-        if (&parts[ind[0]] >= &ci->progeny[k]->parts[0] &&
-            &parts[ind[0]] < &ci->progeny[k]->parts[ci->progeny[k]->count]) {
+        if (&parts[ind[0]] >= &ci->progeny[k]->hydro.parts[0] &&
+            &parts[ind[0]] <
+                &ci->progeny[k]->hydro.parts[ci->progeny[k]->hydro.count]) {
           sub = ci->progeny[k];
           break;
         }
diff --git a/src/runner_doiact_grav.h b/src/runner_doiact_grav.h
index 240f812984362fcd4130f2465ff1b176bb6fb067..c6885746a29fd7b6bd828496316f8dad01c1b7da 100644
--- a/src/runner_doiact_grav.h
+++ b/src/runner_doiact_grav.h
@@ -48,8 +48,9 @@ static INLINE void runner_do_grav_down(struct runner *r, struct cell *c,
   TIMER_TIC;
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->ti_old_multipole != e->ti_current) error("c->multipole not drifted.");
-  if (c->multipole->pot.ti_init != e->ti_current)
+  if (c->grav.ti_old_multipole != e->ti_current)
+    error("c->multipole not drifted.");
+  if (c->grav.multipole->pot.ti_init != e->ti_current)
     error("c->field tensor not initialised");
 #endif
 
@@ -65,22 +66,22 @@ static INLINE void runner_do_grav_down(struct runner *r, struct cell *c,
       if (cp != NULL && cell_is_active_gravity(cp, e)) {
 
 #ifdef SWIFT_DEBUG_CHECKS
-        if (cp->ti_old_multipole != e->ti_current)
+        if (cp->grav.ti_old_multipole != e->ti_current)
           error("cp->multipole not drifted.");
-        if (cp->multipole->pot.ti_init != e->ti_current)
+        if (cp->grav.multipole->pot.ti_init != e->ti_current)
           error("cp->field tensor not initialised");
 #endif
         /* If the tensor received any contribution, push it down */
-        if (c->multipole->pot.interacted) {
+        if (c->grav.multipole->pot.interacted) {
 
           struct grav_tensor shifted_tensor;
 
           /* Shift the field tensor */
-          gravity_L2L(&shifted_tensor, &c->multipole->pot, cp->multipole->CoM,
-                      c->multipole->CoM);
+          gravity_L2L(&shifted_tensor, &c->grav.multipole->pot,
+                      cp->grav.multipole->CoM, c->grav.multipole->CoM);
 
           /* Add it to this level's tensor */
-          gravity_field_tensors_add(&cp->multipole->pot, &shifted_tensor);
+          gravity_field_tensors_add(&cp->grav.multipole->pot, &shifted_tensor);
         }
 
         /* Recurse */
@@ -93,16 +94,16 @@ static INLINE void runner_do_grav_down(struct runner *r, struct cell *c,
     /* Leaf case */
 
     /* We can abort early if no interactions via multipole happened */
-    if (!c->multipole->pot.interacted) return;
+    if (!c->grav.multipole->pot.interacted) return;
 
     if (!cell_are_gpart_drifted(c, e)) error("Un-drifted gparts");
 
     /* Cell properties */
-    struct gpart *gparts = c->gparts;
-    const int gcount = c->gcount;
-    const struct grav_tensor *pot = &c->multipole->pot;
-    const double CoM[3] = {c->multipole->CoM[0], c->multipole->CoM[1],
-                           c->multipole->CoM[2]};
+    struct gpart *gparts = c->grav.parts;
+    const int gcount = c->grav.count;
+    const struct grav_tensor *pot = &c->grav.multipole->pot;
+    const double CoM[3] = {c->grav.multipole->CoM[0], c->grav.multipole->CoM[1],
+                           c->grav.multipole->CoM[2]};
 
     /* Apply accelerations to the particles */
     for (int i = 0; i < gcount; ++i) {
@@ -117,8 +118,15 @@ static INLINE void runner_do_grav_down(struct runner *r, struct cell *c,
         /* Check that particles have been drifted to the current time */
         if (gp->ti_drift != e->ti_current)
           error("gpart not drifted to current time");
-        if (c->multipole->pot.ti_init != e->ti_current)
+        if (c->grav.multipole->pot.ti_init != e->ti_current)
           error("c->field tensor not initialised");
+
+        /* Check that we are not updated an inhibited particle */
+        if (gpart_is_inhibited(gp, e)) error("Updating an inhibited particle!");
+
+        /* Check that the particle was initialised */
+        if (gp->initialised == 0)
+          error("Adding forces to an un-initialised gpart.");
 #endif
         /* Apply the kernel */
         gravity_L2P(pot, CoM, gp);
@@ -221,8 +229,22 @@ static INLINE void runner_dopair_grav_pp_full(
       /* Check that particles have been drifted to the current time */
       if (gparts_i[pid].ti_drift != e->ti_current)
         error("gpi not drifted to current time");
-      if (pjd < gcount_j && gparts_j[pjd].ti_drift != e->ti_current)
+      if (pjd < gcount_j && gparts_j[pjd].ti_drift != e->ti_current &&
+          !gpart_is_inhibited(&gparts_j[pjd], e))
         error("gpj not drifted to current time");
+
+      /* Check that we are not updated an inhibited particle */
+      if (gpart_is_inhibited(&gparts_i[pid], e))
+        error("Updating an inhibited particle!");
+
+      /* Check that the particle we interact with was not inhibited */
+      if (pjd < gcount_j && gpart_is_inhibited(&gparts_j[pjd], e) &&
+          mass_j != 0.f)
+        error("Inhibited particle used as gravity source.");
+
+      /* Check that the particle was initialised */
+      if (gparts_i[pid].initialised == 0)
+        error("Adding forces to an un-initialised gpart.");
 #endif
 
       /* Interact! */
@@ -238,7 +260,8 @@ static INLINE void runner_dopair_grav_pp_full(
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Update the interaction counter if it's not a padded gpart */
-      if (pjd < gcount_j) gparts_i[pid].num_interacted++;
+      if (pjd < gcount_j && !gpart_is_inhibited(&gparts_j[pjd], e))
+        gparts_i[pid].num_interacted++;
 #endif
     }
 
@@ -347,8 +370,22 @@ static INLINE void runner_dopair_grav_pp_truncated(
       /* Check that particles have been drifted to the current time */
       if (gparts_i[pid].ti_drift != e->ti_current)
         error("gpi not drifted to current time");
-      if (pjd < gcount_j && gparts_j[pjd].ti_drift != e->ti_current)
+      if (pjd < gcount_j && gparts_j[pjd].ti_drift != e->ti_current &&
+          !gpart_is_inhibited(&gparts_j[pjd], e))
         error("gpj not drifted to current time");
+
+      /* Check that we are not updated an inhibited particle */
+      if (gpart_is_inhibited(&gparts_i[pid], e))
+        error("Updating an inhibited particle!");
+
+      /* Check that the particle we interact with was not inhibited */
+      if (pjd < gcount_j && gpart_is_inhibited(&gparts_j[pjd], e) &&
+          mass_j != 0.f)
+        error("Inhibited particle used as gravity source.");
+
+      /* Check that the particle was initialised */
+      if (gparts_i[pid].initialised == 0)
+        error("Adding forces to an un-initialised gpart.");
 #endif
 
       /* Interact! */
@@ -364,7 +401,8 @@ static INLINE void runner_dopair_grav_pp_truncated(
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Update the interaction counter if it's not a padded gpart */
-      if (pjd < gcount_j) gparts_i[pid].num_interacted++;
+      if (pjd < gcount_j && !gpart_is_inhibited(&gparts_j[pjd], e))
+        gparts_i[pid].num_interacted++;
 #endif
     }
 
@@ -433,6 +471,18 @@ static INLINE void runner_dopair_grav_pm_full(
     if (pid < gcount_i && !gpart_is_active(&gparts_i[pid], e))
       error("Active particle went through the cache");
 
+    /* Check that particles have been drifted to the current time */
+    if (gparts_i[pid].ti_drift != e->ti_current)
+      error("gpi not drifted to current time");
+
+    /* Check that we are not updated an inhibited particle */
+    if (gpart_is_inhibited(&gparts_i[pid], e))
+      error("Updating an inhibited particle!");
+
+    /* Check that the particle was initialised */
+    if (gparts_i[pid].initialised == 0)
+      error("Adding forces to an un-initialised gpart.");
+
     if (pid >= gcount_i) error("Adding forces to padded particle");
 #endif
 
@@ -458,13 +508,13 @@ static INLINE void runner_dopair_grav_pm_full(
 
     const float r2 = dx * dx + dy * dy + dz * dz;
 
-#ifdef SWIFT_DEBUG_CHECKSa
-    const float r_max_j = cj->multipole->r_max;
+#ifdef SWIFT_DEBUG_CHECKS
+    const float r_max_j = cj->grav.multipole->r_max;
     const float r_max2 = r_max_j * r_max_j;
     const float theta_crit2 = e->gravity_properties->theta_crit2;
 
-    /* 1.01 to avoid FP rounding false-positives */
-    if (!gravity_M2P_accept(r_max2, theta_crit2 * 1.01, r2))
+    /* Note: 1.1 to avoid FP rounding false-positives */
+    if (!gravity_M2P_accept(r_max2, theta_crit2 * 1.1, r2))
       error(
           "use_mpole[i] set when M2P accept fails CoM=[%e %e %e] pos=[%e %e "
           "%e], rmax=%e",
@@ -485,7 +535,7 @@ static INLINE void runner_dopair_grav_pm_full(
 #ifdef SWIFT_DEBUG_CHECKS
     /* Update the interaction counter */
     if (pid < gcount_i)
-      gparts_i[pid].num_interacted += cj->multipole->m_pole.num_gpart;
+      gparts_i[pid].num_interacted += cj->grav.multipole->m_pole.num_gpart;
 #endif
   }
 }
@@ -554,6 +604,18 @@ static INLINE void runner_dopair_grav_pm_truncated(
     if (pid < gcount_i && !gpart_is_active(&gparts_i[pid], e))
       error("Active particle went through the cache");
 
+    /* Check that particles have been drifted to the current time */
+    if (gparts_i[pid].ti_drift != e->ti_current)
+      error("gpi not drifted to current time");
+
+    /* Check that we are not updated an inhibited particle */
+    if (gpart_is_inhibited(&gparts_i[pid], e))
+      error("Updating an inhibited particle!");
+
+    /* Check that the particle was initialised */
+    if (gparts_i[pid].initialised == 0)
+      error("Adding forces to an un-initialised gpart.");
+
     if (pid >= gcount_i) error("Adding forces to padded particle");
 #endif
 
@@ -578,12 +640,12 @@ static INLINE void runner_dopair_grav_pm_truncated(
     const float r2 = dx * dx + dy * dy + dz * dz;
 
 #ifdef SWIFT_DEBUG_CHECKS
-    const float r_max_j = cj->multipole->r_max;
+    const float r_max_j = cj->grav.multipole->r_max;
     const float r_max2 = r_max_j * r_max_j;
     const float theta_crit2 = e->gravity_properties->theta_crit2;
 
-    /* 1.01 to avoid FP rounding false-positives */
-    if (!gravity_M2P_accept(r_max2, theta_crit2 * 1.01, r2))
+    /* 1.1 to avoid FP rounding false-positives */
+    if (!gravity_M2P_accept(r_max2, theta_crit2 * 1.1, r2))
       error(
           "use_mpole[i] set when M2P accept fails CoM=[%e %e %e] pos=[%e %e "
           "%e], rmax=%e",
@@ -604,7 +666,7 @@ static INLINE void runner_dopair_grav_pm_truncated(
 #ifdef SWIFT_DEBUG_CHECKS
     /* Update the interaction counter */
     if (pid < gcount_i)
-      gparts_i[pid].num_interacted += cj->multipole->m_pole.num_gpart;
+      gparts_i[pid].num_interacted += cj->grav.multipole->m_pole.num_gpart;
 #endif
   }
 }
@@ -635,15 +697,18 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
   /* Recover some useful constants */
   const struct engine *e = r->e;
   const int periodic = e->mesh->periodic;
-  const float dim[3] = {e->mesh->dim[0], e->mesh->dim[1], e->mesh->dim[2]};
+  const float dim[3] = {(float)e->mesh->dim[0], (float)e->mesh->dim[1],
+                        (float)e->mesh->dim[2]};
   const float r_s_inv = e->mesh->r_s_inv;
   const double min_trunc = e->mesh->r_cut_min;
 
   TIMER_TIC;
 
   /* Record activity status */
-  const int ci_active = cell_is_active_gravity(ci, e);
-  const int cj_active = cell_is_active_gravity(cj, e);
+  const int ci_active =
+      cell_is_active_gravity(ci, e) && (ci->nodeID == e->nodeID);
+  const int cj_active =
+      cell_is_active_gravity(cj, e) && (cj->nodeID == e->nodeID);
 
   /* Anything to do here? */
   if (!ci_active && !cj_active) return;
@@ -655,9 +720,9 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
   /* Let's start by checking things are drifted */
   if (!cell_are_gpart_drifted(ci, e)) error("Un-drifted gparts");
   if (!cell_are_gpart_drifted(cj, e)) error("Un-drifted gparts");
-  if (cj_active && ci->ti_old_multipole != e->ti_current)
+  if (cj_active && ci->grav.ti_old_multipole != e->ti_current)
     error("Un-drifted multipole");
-  if (ci_active && cj->ti_old_multipole != e->ti_current)
+  if (ci_active && cj->grav.ti_old_multipole != e->ti_current)
     error("Un-drifted multipole");
 
   /* Caches to play with */
@@ -669,24 +734,24 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
   const double shift_j[3] = {0., 0., 0.};
 
   /* Recover the multipole info and shift the CoM locations */
-  const float rmax_i = ci->multipole->r_max;
-  const float rmax_j = cj->multipole->r_max;
+  const float rmax_i = ci->grav.multipole->r_max;
+  const float rmax_j = cj->grav.multipole->r_max;
   const float rmax2_i = rmax_i * rmax_i;
   const float rmax2_j = rmax_j * rmax_j;
-  const struct multipole *multi_i = &ci->multipole->m_pole;
-  const struct multipole *multi_j = &cj->multipole->m_pole;
-  const float CoM_i[3] = {(float)(ci->multipole->CoM[0] - shift_i[0]),
-                          (float)(ci->multipole->CoM[1] - shift_i[1]),
-                          (float)(ci->multipole->CoM[2] - shift_i[2])};
-  const float CoM_j[3] = {(float)(cj->multipole->CoM[0] - shift_j[0]),
-                          (float)(cj->multipole->CoM[1] - shift_j[1]),
-                          (float)(cj->multipole->CoM[2] - shift_j[2])};
+  const struct multipole *multi_i = &ci->grav.multipole->m_pole;
+  const struct multipole *multi_j = &cj->grav.multipole->m_pole;
+  const float CoM_i[3] = {(float)(ci->grav.multipole->CoM[0] - shift_i[0]),
+                          (float)(ci->grav.multipole->CoM[1] - shift_i[1]),
+                          (float)(ci->grav.multipole->CoM[2] - shift_i[2])};
+  const float CoM_j[3] = {(float)(cj->grav.multipole->CoM[0] - shift_j[0]),
+                          (float)(cj->grav.multipole->CoM[1] - shift_j[1]),
+                          (float)(cj->grav.multipole->CoM[2] - shift_j[2])};
 
   /* Start by constructing particle caches */
 
   /* Computed the padded counts */
-  const int gcount_i = ci->gcount;
-  const int gcount_j = cj->gcount;
+  const int gcount_i = ci->grav.count;
+  const int gcount_j = cj->grav.count;
   const int gcount_padded_i = gcount_i - (gcount_i % VEC_SIZE) + VEC_SIZE;
   const int gcount_padded_j = gcount_j - (gcount_j % VEC_SIZE) + VEC_SIZE;
 
@@ -699,10 +764,10 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
 
   /* Fill the caches */
   gravity_cache_populate(e->max_active_bin, allow_mpole, periodic, dim,
-                         ci_cache, ci->gparts, gcount_i, gcount_padded_i,
+                         ci_cache, ci->grav.parts, gcount_i, gcount_padded_i,
                          shift_i, CoM_j, rmax2_j, ci, e->gravity_properties);
   gravity_cache_populate(e->max_active_bin, allow_mpole, periodic, dim,
-                         cj_cache, cj->gparts, gcount_j, gcount_padded_j,
+                         cj_cache, cj->grav.parts, gcount_j, gcount_padded_j,
                          shift_j, CoM_i, rmax2_i, cj, e->gravity_properties);
 
   /* Can we use the Newtonian version or do we need the truncated one ? */
@@ -715,25 +780,27 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
 
       /* First the P2P */
       runner_dopair_grav_pp_full(ci_cache, cj_cache, gcount_i, gcount_j,
-                                 gcount_padded_j, periodic, dim, e, ci->gparts,
-                                 cj->gparts);
+                                 gcount_padded_j, periodic, dim, e,
+                                 ci->grav.parts, cj->grav.parts);
 
       /* Then the M2P */
       if (allow_mpole)
         runner_dopair_grav_pm_full(ci_cache, gcount_padded_i, CoM_j, multi_j,
-                                   periodic, dim, e, ci->gparts, gcount_i, cj);
+                                   periodic, dim, e, ci->grav.parts, gcount_i,
+                                   cj);
     }
     if (cj_active && symmetric) {
 
       /* First the P2P */
       runner_dopair_grav_pp_full(cj_cache, ci_cache, gcount_j, gcount_i,
-                                 gcount_padded_i, periodic, dim, e, cj->gparts,
-                                 ci->gparts);
+                                 gcount_padded_i, periodic, dim, e,
+                                 cj->grav.parts, ci->grav.parts);
 
       /* Then the M2P */
       if (allow_mpole)
         runner_dopair_grav_pm_full(cj_cache, gcount_padded_j, CoM_i, multi_i,
-                                   periodic, dim, e, cj->gparts, gcount_j, ci);
+                                   periodic, dim, e, cj->grav.parts, gcount_j,
+                                   ci);
     }
 
   } else { /* Periodic BC */
@@ -757,26 +824,26 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
         /* First the (truncated) P2P */
         runner_dopair_grav_pp_truncated(ci_cache, cj_cache, gcount_i, gcount_j,
                                         gcount_padded_j, dim, r_s_inv, e,
-                                        ci->gparts, cj->gparts);
+                                        ci->grav.parts, cj->grav.parts);
 
         /* Then the M2P */
         if (allow_mpole)
           runner_dopair_grav_pm_truncated(ci_cache, gcount_padded_i, CoM_j,
-                                          multi_j, dim, r_s_inv, e, ci->gparts,
-                                          gcount_i, cj);
+                                          multi_j, dim, r_s_inv, e,
+                                          ci->grav.parts, gcount_i, cj);
       }
       if (cj_active && symmetric) {
 
         /* First the (truncated) P2P */
         runner_dopair_grav_pp_truncated(cj_cache, ci_cache, gcount_j, gcount_i,
                                         gcount_padded_i, dim, r_s_inv, e,
-                                        cj->gparts, ci->gparts);
+                                        cj->grav.parts, ci->grav.parts);
 
         /* Then the M2P */
         if (allow_mpole)
           runner_dopair_grav_pm_truncated(cj_cache, gcount_padded_j, CoM_i,
-                                          multi_i, dim, r_s_inv, e, cj->gparts,
-                                          gcount_j, ci);
+                                          multi_i, dim, r_s_inv, e,
+                                          cj->grav.parts, gcount_j, ci);
       }
 
     } else {
@@ -789,12 +856,12 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
         /* First the (Newtonian) P2P */
         runner_dopair_grav_pp_full(ci_cache, cj_cache, gcount_i, gcount_j,
                                    gcount_padded_j, periodic, dim, e,
-                                   ci->gparts, cj->gparts);
+                                   ci->grav.parts, cj->grav.parts);
 
         /* Then the M2P */
         if (allow_mpole)
           runner_dopair_grav_pm_full(ci_cache, gcount_padded_i, CoM_j, multi_j,
-                                     periodic, dim, e, ci->gparts, gcount_i,
+                                     periodic, dim, e, ci->grav.parts, gcount_i,
                                      cj);
       }
       if (cj_active && symmetric) {
@@ -802,21 +869,21 @@ static INLINE void runner_dopair_grav_pp(struct runner *r, struct cell *ci,
         /* First the (Newtonian) P2P */
         runner_dopair_grav_pp_full(cj_cache, ci_cache, gcount_j, gcount_i,
                                    gcount_padded_i, periodic, dim, e,
-                                   cj->gparts, ci->gparts);
+                                   cj->grav.parts, ci->grav.parts);
 
         /* Then the M2P */
         if (allow_mpole)
           runner_dopair_grav_pm_full(cj_cache, gcount_padded_j, CoM_i, multi_i,
-                                     periodic, dim, e, cj->gparts, gcount_j,
+                                     periodic, dim, e, cj->grav.parts, gcount_j,
                                      ci);
       }
     }
   }
 
   /* Write back to the particles */
-  if (ci_active) gravity_cache_write_back(ci_cache, ci->gparts, gcount_i);
+  if (ci_active) gravity_cache_write_back(ci_cache, ci->grav.parts, gcount_i);
   if (cj_active && symmetric)
-    gravity_cache_write_back(cj_cache, cj->gparts, gcount_j);
+    gravity_cache_write_back(cj_cache, cj->grav.parts, gcount_j);
 
   TIMER_TOC(timer_dopair_grav_pp);
 }
@@ -892,8 +959,21 @@ static INLINE void runner_doself_grav_pp_full(
       /* Check that particles have been drifted to the current time */
       if (gparts[pid].ti_drift != e->ti_current)
         error("gpi not drifted to current time");
-      if (pjd < gcount && gparts[pjd].ti_drift != e->ti_current)
+      if (pjd < gcount && gparts[pjd].ti_drift != e->ti_current &&
+          !gpart_is_inhibited(&gparts[pjd], e))
         error("gpj not drifted to current time");
+
+      /* Check that we are not updated an inhibited particle */
+      if (gpart_is_inhibited(&gparts[pid], e))
+        error("Updating an inhibited particle!");
+
+      /* Check that the particle we interact with was not inhibited */
+      if (pjd < gcount && gpart_is_inhibited(&gparts[pjd], e) && mass_j != 0.f)
+        error("Inhibited particle used as gravity source.");
+
+      /* Check that the particle was initialised */
+      if (gparts[pid].initialised == 0)
+        error("Adding forces to an un-initialised gpart.");
 #endif
 
       /* Interact! */
@@ -909,7 +989,8 @@ static INLINE void runner_doself_grav_pp_full(
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Update the interaction counter if it's not a padded gpart */
-      if (pjd < gcount) gparts[pid].num_interacted++;
+      if (pjd < gcount && !gpart_is_inhibited(&gparts[pjd], e))
+        gparts[pid].num_interacted++;
 #endif
     }
 
@@ -1002,8 +1083,21 @@ static INLINE void runner_doself_grav_pp_truncated(
       /* Check that particles have been drifted to the current time */
       if (gparts[pid].ti_drift != e->ti_current)
         error("gpi not drifted to current time");
-      if (pjd < gcount && gparts[pjd].ti_drift != e->ti_current)
+      if (pjd < gcount && gparts[pjd].ti_drift != e->ti_current &&
+          !gpart_is_inhibited(&gparts[pjd], e))
         error("gpj not drifted to current time");
+
+      /* Check that we are not updated an inhibited particle */
+      if (gpart_is_inhibited(&gparts[pid], e))
+        error("Updating an inhibited particle!");
+
+      /* Check that the particle we interact with was not inhibited */
+      if (pjd < gcount && gpart_is_inhibited(&gparts[pjd], e) && mass_j != 0.f)
+        error("Inhibited particle used as gravity source.");
+
+      /* Check that the particle was initialised */
+      if (gparts[pid].initialised == 0)
+        error("Adding forces to an un-initialised gpart.");
 #endif
 
       /* Interact! */
@@ -1019,7 +1113,8 @@ static INLINE void runner_doself_grav_pp_truncated(
 
 #ifdef SWIFT_DEBUG_CHECKS
       /* Update the interaction counter if it's not a padded gpart */
-      if (pjd < gcount) gparts[pid].num_interacted++;
+      if (pjd < gcount && !gpart_is_inhibited(&gparts[pjd], e))
+        gparts[pid].num_interacted++;
 #endif
     }
 
@@ -1056,7 +1151,7 @@ static INLINE void runner_doself_grav_pp(struct runner *r, struct cell *c) {
   TIMER_TIC;
 
 #ifdef SWIFT_DEBUG_CHECKS
-  if (c->gcount == 0) error("Doing self gravity on an empty cell !");
+  if (c->grav.count == 0) error("Doing self gravity on an empty cell !");
 #endif
 
   /* Anything to do here? */
@@ -1077,7 +1172,7 @@ static INLINE void runner_doself_grav_pp(struct runner *r, struct cell *c) {
                          c->loc[2] + 0.5 * c->width[2]};
 
   /* Computed the padded counts */
-  const int gcount = c->gcount;
+  const int gcount = c->grav.count;
   const int gcount_padded = gcount - (gcount % VEC_SIZE) + VEC_SIZE;
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1087,7 +1182,7 @@ static INLINE void runner_doself_grav_pp(struct runner *r, struct cell *c) {
 #endif
 
   /* Fill the cache */
-  gravity_cache_populate_no_mpole(e->max_active_bin, ci_cache, c->gparts,
+  gravity_cache_populate_no_mpole(e->max_active_bin, ci_cache, c->grav.parts,
                                   gcount, gcount_padded, loc, c,
                                   e->gravity_properties);
 
@@ -1095,33 +1190,101 @@ static INLINE void runner_doself_grav_pp(struct runner *r, struct cell *c) {
   if (!periodic) {
 
     /* Not periodic -> Can always use Newtonian potential */
-    runner_doself_grav_pp_full(ci_cache, gcount, gcount_padded, e, c->gparts);
+    runner_doself_grav_pp_full(ci_cache, gcount, gcount_padded, e,
+                               c->grav.parts);
 
   } else {
 
     /* Get the maximal distance between any two particles */
-    const double max_r = 2. * c->multipole->r_max;
+    const double max_r = 2. * c->grav.multipole->r_max;
 
     /* Do we need to use the truncated interactions ? */
     if (max_r > min_trunc) {
 
       /* Periodic but far-away cells must use the truncated potential */
       runner_doself_grav_pp_truncated(ci_cache, gcount, gcount_padded, r_s_inv,
-                                      e, c->gparts);
+                                      e, c->grav.parts);
 
     } else {
 
       /* Periodic but close-by cells can use the full Newtonian potential */
-      runner_doself_grav_pp_full(ci_cache, gcount, gcount_padded, e, c->gparts);
+      runner_doself_grav_pp_full(ci_cache, gcount, gcount_padded, e,
+                                 c->grav.parts);
     }
   }
 
   /* Write back to the particles */
-  gravity_cache_write_back(ci_cache, c->gparts, gcount);
+  gravity_cache_write_back(ci_cache, c->grav.parts, gcount);
 
   TIMER_TOC(timer_doself_grav_pp);
 }
 
+/**
+ * @brief Computes the interaction of the field tensor and multipole
+ * of two cells symmetrically.
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ */
+static INLINE void runner_dopair_grav_mm_symmetric(struct runner *r,
+                                                   struct cell *restrict ci,
+                                                   struct cell *restrict cj) {
+
+  /* Some constants */
+  const struct engine *e = r->e;
+  const struct gravity_props *props = e->gravity_properties;
+  const int periodic = e->mesh->periodic;
+  const double dim[3] = {e->mesh->dim[0], e->mesh->dim[1], e->mesh->dim[2]};
+  const float r_s_inv = e->mesh->r_s_inv;
+
+  TIMER_TIC;
+
+  /* Anything to do here? */
+  if ((!cell_is_active_gravity_mm(ci, e) || ci->nodeID != engine_rank) ||
+      (!cell_is_active_gravity_mm(cj, e) || cj->nodeID != engine_rank))
+    error("Invalid state in symmetric M-M calculation!");
+
+  /* Short-cut to the multipole */
+  const struct multipole *multi_i = &ci->grav.multipole->m_pole;
+  const struct multipole *multi_j = &cj->grav.multipole->m_pole;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ci == cj) error("Interacting a cell with itself using M2L");
+
+  if (multi_i->num_gpart == 0)
+    error("Multipole i does not seem to have been set.");
+
+  if (multi_j->num_gpart == 0)
+    error("Multipole j does not seem to have been set.");
+
+  if (ci->grav.multipole->pot.ti_init != e->ti_current)
+    error("ci->grav tensor not initialised.");
+
+  if (ci->grav.multipole->pot.ti_init != e->ti_current)
+    error("cj->grav tensor not initialised.");
+
+  if (ci->grav.ti_old_multipole != e->ti_current)
+    error(
+        "Undrifted multipole ci->grav.ti_old_multipole=%lld ci->nodeID=%d "
+        "cj->nodeID=%d e->ti_current=%lld",
+        ci->grav.ti_old_multipole, ci->nodeID, cj->nodeID, e->ti_current);
+
+  if (cj->grav.ti_old_multipole != e->ti_current)
+    error(
+        "Undrifted multipole cj->grav.ti_old_multipole=%lld cj->nodeID=%d "
+        "ci->nodeID=%d e->ti_current=%lld",
+        cj->grav.ti_old_multipole, cj->nodeID, ci->nodeID, e->ti_current);
+#endif
+
+  /* Let's interact at this level */
+  gravity_M2L_symmetric(&ci->grav.multipole->pot, &cj->grav.multipole->pot,
+                        multi_i, multi_j, ci->grav.multipole->CoM,
+                        cj->grav.multipole->CoM, props, periodic, dim, r_s_inv);
+
+  TIMER_TOC(timer_dopair_grav_mm);
+}
+
 /**
  * @brief Computes the interaction of the field tensor in a cell with the
  * multipole of another cell.
@@ -1130,9 +1293,9 @@ static INLINE void runner_doself_grav_pp(struct runner *r, struct cell *c) {
  * @param ci The #cell with field tensor to interact.
  * @param cj The #cell with the multipole.
  */
-static INLINE void runner_dopair_grav_mm(struct runner *r,
-                                         struct cell *restrict ci,
-                                         struct cell *restrict cj) {
+static INLINE void runner_dopair_grav_mm_nonsym(
+    struct runner *r, struct cell *restrict ci,
+    const struct cell *restrict cj) {
 
   /* Some constants */
   const struct engine *e = r->e;
@@ -1144,10 +1307,10 @@ static INLINE void runner_dopair_grav_mm(struct runner *r,
   TIMER_TIC;
 
   /* Anything to do here? */
-  if (!cell_is_active_gravity(ci, e) || ci->nodeID != engine_rank) return;
+  if (!cell_is_active_gravity_mm(ci, e) || ci->nodeID != engine_rank) return;
 
   /* Short-cut to the multipole */
-  const struct multipole *multi_j = &cj->multipole->m_pole;
+  const struct multipole *multi_j = &cj->grav.multipole->m_pole;
 
 #ifdef SWIFT_DEBUG_CHECKS
   if (ci == cj) error("Interacting a cell with itself using M2L");
@@ -1155,31 +1318,97 @@ static INLINE void runner_dopair_grav_mm(struct runner *r,
   if (multi_j->num_gpart == 0)
     error("Multipole does not seem to have been set.");
 
-  if (ci->multipole->pot.ti_init != e->ti_current)
+  if (ci->grav.multipole->pot.ti_init != e->ti_current)
     error("ci->grav tensor not initialised.");
-#endif
 
-  /* Do we need to drift the multipole ? */
-  if (cj->ti_old_multipole != e->ti_current)
+  if (cj->grav.ti_old_multipole != e->ti_current)
     error(
-        "Undrifted multipole cj->ti_old_multipole=%lld cj->nodeID=%d "
+        "Undrifted multipole cj->grav.ti_old_multipole=%lld cj->nodeID=%d "
         "ci->nodeID=%d e->ti_current=%lld",
-        cj->ti_old_multipole, cj->nodeID, ci->nodeID, e->ti_current);
+        cj->grav.ti_old_multipole, cj->nodeID, ci->nodeID, e->ti_current);
+#endif
 
   /* Let's interact at this level */
-  gravity_M2L(&ci->multipole->pot, multi_j, ci->multipole->CoM,
-              cj->multipole->CoM, props, periodic, dim, r_s_inv);
+  gravity_M2L_nonsym(&ci->grav.multipole->pot, multi_j, ci->grav.multipole->CoM,
+                     cj->grav.multipole->CoM, props, periodic, dim, r_s_inv);
 
   TIMER_TOC(timer_dopair_grav_mm);
 }
 
+/**
+ * @brief Call the M-M calculation on two cells if active.
+ *
+ * @param r The #runner object.
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ */
+static INLINE void runner_dopair_grav_mm(struct runner *r,
+                                         struct cell *restrict ci,
+                                         struct cell *restrict cj) {
+
+  const struct engine *e = r->e;
+
+  /* What do we need to do? */
+  const int do_i =
+      cell_is_active_gravity_mm(ci, e) && (ci->nodeID == e->nodeID);
+  const int do_j =
+      cell_is_active_gravity_mm(cj, e) && (cj->nodeID == e->nodeID);
+
+  /* Do we need drifting first? */
+  if (ci->grav.ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
+  if (cj->grav.ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
+
+  /* Interact! */
+  if (do_i && do_j)
+    runner_dopair_grav_mm_symmetric(r, ci, cj);
+  else if (do_i)
+    runner_dopair_grav_mm_nonsym(r, ci, cj);
+  else if (do_j)
+    runner_dopair_grav_mm_nonsym(r, cj, ci);
+}
+
+/**
+ * @brief Computes all the M-M interactions between all the well-separated (at
+ * rebuild) pairs of progenies of the two cells.
+ *
+ * @param r The #runner thread.
+ * @param flags The task flag containing the list of well-separated pairs as a
+ * bit-field.
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ */
+static INLINE void runner_dopair_grav_mm_progenies(struct runner *r,
+                                                   const long long flags,
+                                                   struct cell *restrict ci,
+                                                   struct cell *restrict cj) {
+
+  /* Loop over all pairs of progenies */
+  for (int i = 0; i < 8; i++) {
+    if (ci->progeny[i] != NULL) {
+      for (int j = 0; j < 8; j++) {
+        if (cj->progeny[j] != NULL) {
+
+          struct cell *cpi = ci->progeny[i];
+          struct cell *cpj = cj->progeny[j];
+
+          const int flag = i * 8 + j;
+
+          /* Did we agree to use an M-M interaction here at the last rebuild? */
+          if (flags & (1ULL << flag)) runner_dopair_grav_mm(r, cpi, cpj);
+        }
+      }
+    }
+  }
+}
+
 static INLINE void runner_dopair_recursive_grav_pm(struct runner *r,
                                                    struct cell *ci,
                                                    const struct cell *cj) {
   /* Some constants */
   const struct engine *e = r->e;
   const int periodic = e->mesh->periodic;
-  const float dim[3] = {e->mesh->dim[0], e->mesh->dim[1], e->mesh->dim[2]};
+  const float dim[3] = {(float)e->mesh->dim[0], (float)e->mesh->dim[1],
+                        (float)e->mesh->dim[2]};
   const float r_s_inv = e->mesh->r_s_inv;
 
   /* Anything to do here? */
@@ -1187,14 +1416,14 @@ static INLINE void runner_dopair_recursive_grav_pm(struct runner *r,
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Early abort? */
-  if (ci->gcount == 0 || cj->gcount == 0)
+  if (ci->grav.count == 0 || cj->grav.count == 0)
     error("Doing pair gravity on an empty cell !");
 
   /* Sanity check */
   if (ci == cj) error("Pair interaction between a cell and itself.");
 
-  if (cj->ti_old_multipole != e->ti_current)
-    error("cj->multipole not drifted.");
+  if (cj->grav.ti_old_multipole != e->ti_current)
+    error("cj->grav.multipole not drifted.");
 #endif
 
   /* Can we recurse further? */
@@ -1215,7 +1444,7 @@ static INLINE void runner_dopair_recursive_grav_pm(struct runner *r,
     struct gravity_cache *const ci_cache = &r->ci_gravity_cache;
 
     /* Computed the padded counts */
-    const int gcount_i = ci->gcount;
+    const int gcount_i = ci->grav.count;
     const int gcount_padded_i = gcount_i - (gcount_i % VEC_SIZE) + VEC_SIZE;
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1225,32 +1454,33 @@ static INLINE void runner_dopair_recursive_grav_pm(struct runner *r,
 #endif
 
     /* Recover the multipole info and the CoM locations */
-    const struct multipole *multi_j = &cj->multipole->m_pole;
-    const float r_max = cj->multipole->r_max;
-    const float CoM_j[3] = {(float)(cj->multipole->CoM[0]),
-                            (float)(cj->multipole->CoM[1]),
-                            (float)(cj->multipole->CoM[2])};
+    const struct multipole *multi_j = &cj->grav.multipole->m_pole;
+    const float r_max = cj->grav.multipole->r_max;
+    const float CoM_j[3] = {(float)(cj->grav.multipole->CoM[0]),
+                            (float)(cj->grav.multipole->CoM[1]),
+                            (float)(cj->grav.multipole->CoM[2])};
 
     /* Fill the cache */
     gravity_cache_populate_all_mpole(
-        e->max_active_bin, periodic, dim, ci_cache, ci->gparts, gcount_i,
+        e->max_active_bin, periodic, dim, ci_cache, ci->grav.parts, gcount_i,
         gcount_padded_i, ci, CoM_j, r_max * r_max, e->gravity_properties);
 
     /* Can we use the Newtonian version or do we need the truncated one ? */
     if (!periodic) {
 
       runner_dopair_grav_pm_full(ci_cache, gcount_padded_i, CoM_j, multi_j,
-                                 periodic, dim, e, ci->gparts, gcount_i, cj);
+                                 periodic, dim, e, ci->grav.parts, gcount_i,
+                                 cj);
 
     } else {
 
       runner_dopair_grav_pm_truncated(ci_cache, gcount_padded_i, CoM_j, multi_j,
-                                      dim, r_s_inv, e, ci->gparts, gcount_i,
+                                      dim, r_s_inv, e, ci->grav.parts, gcount_i,
                                       cj);
     }
 
     /* Write back to the particles */
-    gravity_cache_write_back(ci_cache, ci->gparts, gcount_i);
+    gravity_cache_write_back(ci_cache, ci->grav.parts, gcount_i);
   }
 }
 
@@ -1288,8 +1518,8 @@ static INLINE void runner_dopair_recursive_grav(struct runner *r,
 
 #ifdef SWIFT_DEBUG_CHECKS
 
-  const int gcount_i = ci->gcount;
-  const int gcount_j = cj->gcount;
+  const int gcount_i = ci->grav.count;
+  const int gcount_j = cj->grav.count;
 
   /* Early abort? */
   if (gcount_i == 0 || gcount_j == 0)
@@ -1298,17 +1528,19 @@ static INLINE void runner_dopair_recursive_grav(struct runner *r,
   /* Sanity check */
   if (ci == cj) error("Pair interaction between a cell and itself.");
 
-  if (cell_is_active_gravity(ci, e) && ci->ti_old_multipole != e->ti_current)
-    error("ci->multipole not drifted.");
-  if (cell_is_active_gravity(cj, e) && cj->ti_old_multipole != e->ti_current)
-    error("cj->multipole not drifted.");
+  if (cell_is_active_gravity(ci, e) &&
+      ci->grav.ti_old_multipole != e->ti_current)
+    error("ci->grav.multipole not drifted.");
+  if (cell_is_active_gravity(cj, e) &&
+      cj->grav.ti_old_multipole != e->ti_current)
+    error("cj->grav.multipole not drifted.");
 #endif
 
   TIMER_TIC;
 
   /* Recover the multipole information */
-  struct gravity_tensors *const multi_i = ci->multipole;
-  struct gravity_tensors *const multi_j = cj->multipole;
+  struct gravity_tensors *const multi_i = ci->grav.multipole;
+  struct gravity_tensors *const multi_j = cj->grav.multipole;
 
   /* Get the distance between the CoMs */
   double dx = multi_i->CoM[0] - multi_j->CoM[0];
@@ -1345,9 +1577,8 @@ static INLINE void runner_dopair_recursive_grav(struct runner *r,
   /* Can we use M-M interactions ? */
   if (gravity_M2L_accept(multi_i->r_max, multi_j->r_max, theta_crit2, r2)) {
 
-    /* MATTHIEU: make a symmetric M-M interaction function ! */
+    /* Go M-M */
     runner_dopair_grav_mm(r, ci, cj);
-    runner_dopair_grav_mm(r, cj, ci);
 
   } else if (!ci->split && !cj->split) {
 
@@ -1431,7 +1662,7 @@ static INLINE void runner_doself_recursive_grav(struct runner *r,
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Early abort? */
-  if (c->gcount == 0) error("Doing self gravity on an empty cell !");
+  if (c->grav.count == 0) error("Doing self gravity on an empty cell !");
 #endif
 
   TIMER_TIC;
@@ -1467,28 +1698,6 @@ static INLINE void runner_doself_recursive_grav(struct runner *r,
   if (gettimer) TIMER_TOC(timer_dosub_self_grav);
 }
 
-/**
- * @brief Call the non-symmetric M-M calculation on two cells if active.
- *
- * @param r The #runner object.
- * @param ci The first #cell.
- * @param cj The second #cell.
- */
-static INLINE void runner_dopair_grav_mm_symmetric(struct runner *r,
-                                                   struct cell *ci,
-                                                   struct cell *cj) {
-
-  const struct engine *e = r->e;
-
-#ifdef SWIFT_DEBUG_CHECKS
-  if (!cell_is_active_gravity(ci, e) && !cell_is_active_gravity(cj, e))
-    error("Running M-M task with two inactive cells.");
-#endif
-
-  if (cell_is_active_gravity(ci, e)) runner_dopair_grav_mm(r, ci, cj);
-  if (cell_is_active_gravity(cj, e)) runner_dopair_grav_mm(r, cj, ci);
-}
-
 /**
  * @brief Performs all M-M interactions between a given top-level cell and all
  * the other top-levels that are far enough.
@@ -1505,13 +1714,14 @@ static INLINE void runner_do_grav_long_range(struct runner *r, struct cell *ci,
   const int periodic = e->mesh->periodic;
   const double dim[3] = {e->mesh->dim[0], e->mesh->dim[1], e->mesh->dim[2]};
   const double theta_crit2 = e->gravity_properties->theta_crit2;
-  const double max_distance = e->mesh->r_cut_max;
+  const double max_distance2 = e->mesh->r_cut_max * e->mesh->r_cut_max;
 
   TIMER_TIC;
 
   /* Recover the list of top-level cells */
   struct cell *cells = e->s->cells_top;
-  const int nr_cells = e->s->nr_cells;
+  int *cells_with_particles = e->s->cells_with_particles_top;
+  const int nr_cells_with_particles = e->s->nr_cells_with_particles;
 
   /* Anything to do here? */
   if (!cell_is_active_gravity(ci, e)) return;
@@ -1520,29 +1730,28 @@ static INLINE void runner_do_grav_long_range(struct runner *r, struct cell *ci,
     error("Non-local cell in long-range gravity task!");
 
   /* Check multipole has been drifted */
-  if (ci->ti_old_multipole != e->ti_current)
-    error("Interacting un-drifted multipole");
+  if (ci->grav.ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
+
+  /* Get this cell's multipole information */
+  struct gravity_tensors *const multi_i = ci->grav.multipole;
 
   /* Find this cell's top-level (great-)parent */
   struct cell *top = ci;
   while (top->parent != NULL) top = top->parent;
 
-  /* Flag that contributions will be recieved */
-  struct gravity_tensors *const multi_i = ci->multipole;
-
   /* Recover the top-level multipole (for distance checks) */
-  struct gravity_tensors *const multi_top = top->multipole;
+  struct gravity_tensors *const multi_top = top->grav.multipole;
   const double CoM_rebuild_top[3] = {multi_top->CoM_rebuild[0],
                                      multi_top->CoM_rebuild[1],
                                      multi_top->CoM_rebuild[2]};
 
   /* Loop over all the top-level cells and go for a M-M interaction if
    * well-separated */
-  for (int n = 0; n < nr_cells; ++n) {
+  for (int n = 0; n < nr_cells_with_particles; ++n) {
 
     /* Handle on the top-level cell and it's gravity business*/
-    struct cell *cj = &cells[n];
-    const struct gravity_tensors *const multi_j = cj->multipole;
+    const struct cell *cj = &cells[cells_with_particles[n]];
+    const struct gravity_tensors *const multi_j = cj->grav.multipole;
 
     /* Avoid self contributions */
     if (top == cj) continue;
@@ -1550,6 +1759,29 @@ static INLINE void runner_do_grav_long_range(struct runner *r, struct cell *ci,
     /* Skip empty cells */
     if (multi_j->m_pole.M_000 == 0.f) continue;
 
+    /* Can we escape early in the periodic BC case? */
+    if (periodic) {
+
+      /* Minimal distance between any pair of particles */
+      const double min_radius2 =
+          cell_min_dist2_same_size(top, cj, periodic, dim);
+
+      /* Are we beyond the distance where the truncated forces are 0 ?*/
+      if (min_radius2 > max_distance2) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Need to account for the interactions we missed */
+        multi_i->pot.num_interacted += multi_j->m_pole.num_gpart;
+#endif
+
+        /* Record that this multipole received a contribution */
+        multi_i->pot.interacted = 1;
+
+        /* We are done here. */
+        continue;
+      }
+    }
+
     /* Get the distance between the CoMs at the last rebuild*/
     double dx_r = CoM_rebuild_top[0] - multi_j->CoM_rebuild[0];
     double dy_r = CoM_rebuild_top[1] - multi_j->CoM_rebuild[1];
@@ -1563,30 +1795,12 @@ static INLINE void runner_do_grav_long_range(struct runner *r, struct cell *ci,
     }
     const double r2_rebuild = dx_r * dx_r + dy_r * dy_r + dz_r * dz_r;
 
-    const double max_radius =
-        sqrt(r2_rebuild) - (multi_top->r_max_rebuild + multi_j->r_max_rebuild);
-
-    /* Are we beyond the distance where the truncated forces are 0 ?*/
-    if (periodic && max_radius > max_distance) {
-
-#ifdef SWIFT_DEBUG_CHECKS
-      /* Need to account for the interactions we missed */
-      multi_i->pot.num_interacted += multi_j->m_pole.num_gpart;
-#endif
-
-      /* Record that this multipole received a contribution */
-      multi_i->pot.interacted = 1;
-
-      /* We are done here. */
-      continue;
-    }
-
     /* Are we in charge of this cell pair? */
     if (gravity_M2L_accept(multi_top->r_max_rebuild, multi_j->r_max_rebuild,
                            theta_crit2, r2_rebuild)) {
 
       /* Call the PM interaction fucntion on the active sub-cells of ci */
-      runner_dopair_grav_mm(r, ci, cj);
+      runner_dopair_grav_mm_nonsym(r, ci, cj);
       // runner_dopair_recursive_grav_pm(r, ci, cj);
 
       /* Record that this multipole received a contribution */
diff --git a/src/runner_doiact_stars.h b/src/runner_doiact_stars.h
new file mode 100644
index 0000000000000000000000000000000000000000..f208f14ac98a31a55df6741a2cc5f9cb0a829762
--- /dev/null
+++ b/src/runner_doiact_stars.h
@@ -0,0 +1,1468 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2018 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Before including this file, define FUNCTION, which is the
+   name of the interaction function. This creates the interaction functions
+   runner_dopair_FUNCTION, runner_doself_FUNCTION and runner_dosub_FUNCTION
+   calling the pairwise interaction function runner_iact_FUNCTION. */
+
+#define PASTE(x, y) x##_##y
+
+#define _DOSELF1_STARS(f) PASTE(runner_doself_stars, f)
+#define DOSELF1_STARS _DOSELF1_STARS(FUNCTION)
+
+#define _DO_NONSYM_PAIR1_STARS(f) PASTE(runner_do_nonsym_pair_stars, f)
+#define DO_NONSYM_PAIR1_STARS _DO_NONSYM_PAIR1_STARS(FUNCTION)
+
+#define _DOPAIR1_STARS(f) PASTE(runner_dopair_stars, f)
+#define DOPAIR1_STARS _DOPAIR1_STARS(FUNCTION)
+
+#define _DOPAIR1_SUBSET_STARS(f) PASTE(runner_dopair_subset_stars, f)
+#define DOPAIR1_SUBSET_STARS _DOPAIR1_SUBSET_STARS(FUNCTION)
+
+#define _DOSELF1_SUBSET_STARS(f) PASTE(runner_doself_subset_stars, f)
+#define DOSELF1_SUBSET_STARS _DOSELF1_SUBSET_STARS(FUNCTION)
+
+#define _DOSELF1_SUBSET_BRANCH_STARS(f) \
+  PASTE(runner_doself_subset_branch_stars, f)
+#define DOSELF1_SUBSET_BRANCH_STARS _DOSELF1_SUBSET_BRANCH_STARS(FUNCTION)
+
+#define _DOPAIR1_SUBSET_BRANCH_STARS(f) \
+  PASTE(runner_dopair_subset_branch_stars, f)
+#define DOPAIR1_SUBSET_BRANCH_STARS _DOPAIR1_SUBSET_BRANCH_STARS(FUNCTION)
+
+#define _DOSUB_SUBSET_STARS(f) PASTE(runner_dosub_subset_stars, f)
+#define DOSUB_SUBSET_STARS _DOSUB_SUBSET_STARS(FUNCTION)
+
+#define _DOSELF1_BRANCH_STARS(f) PASTE(runner_doself_branch_stars, f)
+#define DOSELF1_BRANCH_STARS _DOSELF1_BRANCH_STARS(FUNCTION)
+
+#define _DOPAIR1_BRANCH_STARS(f) PASTE(runner_dopair_branch_stars, f)
+#define DOPAIR1_BRANCH_STARS _DOPAIR1_BRANCH_STARS(FUNCTION)
+
+#define _DOSUB_PAIR1_STARS(f) PASTE(runner_dosub_pair_stars, f)
+#define DOSUB_PAIR1_STARS _DOSUB_PAIR1_STARS(FUNCTION)
+
+#define _DOSUB_SELF1_STARS(f) PASTE(runner_dosub_self_stars, f)
+#define DOSUB_SELF1_STARS _DOSUB_SELF1_STARS(FUNCTION)
+
+#define _IACT_STARS(f) PASTE(runner_iact_nonsym_stars, f)
+#define IACT_STARS _IACT_STARS(FUNCTION)
+
+/**
+ * @brief Calculate the number density of #part around the #spart
+ *
+ * @param r runner task
+ * @param c cell
+ * @param timer 1 if the time is to be recorded.
+ */
+void DOSELF1_STARS(struct runner *r, struct cell *c, int timer) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (c->nodeID != engine_rank) error("Should be run on a different node");
+#endif
+
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+
+  /* Anything to do here? */
+  if (!cell_is_active_stars(c, e)) return;
+  if (c->hydro.count == 0 && c->stars.count == 0) return;
+
+  /* Cosmological terms */
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  const int scount = c->stars.count;
+  const int count = c->hydro.count;
+  struct spart *restrict sparts = c->stars.parts;
+  struct part *restrict parts = c->hydro.parts;
+
+  /* Loop over the sparts in ci. */
+  for (int sid = 0; sid < scount; sid++) {
+
+    /* Get a hold of the ith spart in ci. */
+    struct spart *restrict si = &sparts[sid];
+    if (!spart_is_active(si, e) || spart_is_inhibited(si, e)) continue;
+
+    const float hi = si->h;
+    const float hig2 = hi * hi * kernel_gamma2;
+    const float six[3] = {(float)(si->x[0] - c->loc[0]),
+                          (float)(si->x[1] - c->loc[1]),
+                          (float)(si->x[2] - c->loc[2])};
+
+    /* Loop over the parts in cj. */
+    for (int pjd = 0; pjd < count; pjd++) {
+
+      /* Get a pointer to the jth particle. */
+      struct part *restrict pj = &parts[pjd];
+      const float hj = pj->h;
+
+      /* Compute the pairwise distance. */
+      const float pjx[3] = {(float)(pj->x[0] - c->loc[0]),
+                            (float)(pj->x[1] - c->loc[1]),
+                            (float)(pj->x[2] - c->loc[2])};
+      float dx[3] = {six[0] - pjx[0], six[1] - pjx[1], six[2] - pjx[2]};
+      const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check that particles have been drifted to the current time */
+      if (pj->ti_drift != e->ti_current)
+        error("Particle pj not drifted to current time");
+#endif
+
+      if (r2 > 0.f && r2 < hig2) {
+        IACT_STARS(r2, dx, hi, hj, si, pj, a, H);
+      }
+    } /* loop over the parts in ci. */
+  }   /* loop over the sparts in ci. */
+}
+
+/**
+ * @brief Calculate the number density of cj #part around the ci #spart
+ *
+ * @param r runner task
+ * @param ci The first #cell
+ * @param cj The second #cell
+ */
+void DO_NONSYM_PAIR1_STARS(struct runner *r, struct cell *restrict ci,
+                           struct cell *restrict cj) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+#ifdef UPDATE_STARS
+  if (ci->nodeID != engine_rank) error("Should be run on a different node");
+#else
+  if (cj->nodeID != engine_rank) error("Should be run on a different node");
+#endif
+#endif
+
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+
+  /* Anything to do here? */
+  if (!cell_is_active_stars(ci, e)) return;
+
+  const int scount_i = ci->stars.count;
+  const int count_j = cj->hydro.count;
+  struct spart *restrict sparts_i = ci->stars.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
+
+  /* Cosmological terms */
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Get the relative distance between the pairs, wrapping. */
+  double shift[3] = {0.0, 0.0, 0.0};
+  for (int k = 0; k < 3; k++) {
+    if (cj->loc[k] - ci->loc[k] < -e->s->dim[k] / 2)
+      shift[k] = e->s->dim[k];
+    else if (cj->loc[k] - ci->loc[k] > e->s->dim[k] / 2)
+      shift[k] = -e->s->dim[k];
+  }
+
+  /* Loop over the sparts in ci. */
+  for (int sid = 0; sid < scount_i; sid++) {
+
+    /* Get a hold of the ith spart in ci. */
+    struct spart *restrict si = &sparts_i[sid];
+    if (!spart_is_active(si, e) || spart_is_inhibited(si, e)) continue;
+    const float hi = si->h;
+    const float hig2 = hi * hi * kernel_gamma2;
+    const float six[3] = {(float)(si->x[0] - (cj->loc[0] + shift[0])),
+                          (float)(si->x[1] - (cj->loc[1] + shift[1])),
+                          (float)(si->x[2] - (cj->loc[2] + shift[2]))};
+
+    /* Loop over the parts in cj. */
+    for (int pjd = 0; pjd < count_j; pjd++) {
+
+      /* Get a pointer to the jth particle. */
+      struct part *restrict pj = &parts_j[pjd];
+      const float hj = pj->h;
+
+      /* Compute the pairwise distance. */
+      const float pjx[3] = {(float)(pj->x[0] - cj->loc[0]),
+                            (float)(pj->x[1] - cj->loc[1]),
+                            (float)(pj->x[2] - cj->loc[2])};
+      float dx[3] = {six[0] - pjx[0], six[1] - pjx[1], six[2] - pjx[2]};
+      const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check that particles have been drifted to the current time */
+      if (pj->ti_drift != e->ti_current)
+        error("Particle pj not drifted to current time");
+#endif
+
+      if (r2 < hig2) IACT_STARS(r2, dx, hi, hj, si, pj, a, H);
+
+    } /* loop over the parts in cj. */
+  }   /* loop over the parts in ci. */
+}
+
+void DOPAIR1_STARS(struct runner *r, struct cell *restrict ci,
+                   struct cell *restrict cj, int timer) {
+
+#ifdef UPDATE_STARS
+  const int ci_local = ci->nodeID == engine_rank;
+  const int cj_local = cj->nodeID == engine_rank;
+#else
+  /* here we are updating the hydro -> switch ci, cj */
+  const int ci_local = cj->nodeID == engine_rank;
+  const int cj_local = ci->nodeID == engine_rank;
+#endif
+  if (ci_local && ci->stars.count != 0 && cj->hydro.count != 0)
+    DO_NONSYM_PAIR1_STARS(r, ci, cj);
+  if (cj_local && cj->stars.count != 0 && ci->hydro.count != 0)
+    DO_NONSYM_PAIR1_STARS(r, cj, ci);
+}
+
+/**
+ * @brief Compute the interactions between a cell pair, but only for the
+ *      given indices in ci.
+ *
+ * Version using a brute-force algorithm.
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param sparts_i The #part to interact with @c cj.
+ * @param ind The list of indices of particles in @c ci to interact with.
+ * @param scount The number of particles in @c ind.
+ * @param cj The second #cell.
+ * @param shift The shift vector to apply to the particles in ci.
+ */
+void DOPAIR1_SUBSET_STARS(struct runner *r, struct cell *restrict ci,
+                          struct spart *restrict sparts_i, int *restrict ind,
+                          int scount, struct cell *restrict cj,
+                          const double *shift) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ci->nodeID != engine_rank) error("Should be run on a different node");
+#endif
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+
+  const int count_j = cj->hydro.count;
+  struct part *restrict parts_j = cj->hydro.parts;
+
+  /* Cosmological terms */
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Loop over the parts_i. */
+  for (int pid = 0; pid < scount; pid++) {
+
+    /* Get a hold of the ith part in ci. */
+    struct spart *restrict spi = &sparts_i[ind[pid]];
+    double spix[3];
+    for (int k = 0; k < 3; k++) spix[k] = spi->x[k] - shift[k];
+    const float hi = spi->h;
+    const float hig2 = hi * hi * kernel_gamma2;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (!spart_is_active(spi, e))
+      error("Trying to correct smoothing length of inactive particle !");
+#endif
+
+    /* Loop over the parts in cj. */
+    for (int pjd = 0; pjd < count_j; pjd++) {
+
+      /* Get a pointer to the jth particle. */
+      struct part *restrict pj = &parts_j[pjd];
+
+      /* Compute the pairwise distance. */
+      float r2 = 0.0f;
+      float dx[3];
+      for (int k = 0; k < 3; k++) {
+        dx[k] = spix[k] - pj->x[k];
+        r2 += dx[k] * dx[k];
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check that particles have been drifted to the current time */
+      if (pj->ti_drift != e->ti_current)
+        error("Particle pj not drifted to current time");
+#endif
+      /* Hit or miss? */
+      if (r2 < hig2) {
+        IACT_STARS(r2, dx, hi, pj->h, spi, pj, a, H);
+      }
+    } /* loop over the parts in cj. */
+  }   /* loop over the parts in ci. */
+}
+
+/**
+ * @brief Compute the interactions between a cell pair, but only for the
+ *      given indices in ci.
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param sparts The #spart to interact.
+ * @param ind The list of indices of particles in @c ci to interact with.
+ * @param scount The number of particles in @c ind.
+ */
+void DOSELF1_SUBSET_STARS(struct runner *r, struct cell *restrict ci,
+                          struct spart *restrict sparts, int *restrict ind,
+                          int scount) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ci->nodeID != engine_rank) error("Should be run on a different node");
+#endif
+
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+
+  /* Cosmological terms */
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  const int count_i = ci->hydro.count;
+  struct part *restrict parts_j = ci->hydro.parts;
+
+  /* Loop over the parts in ci. */
+  for (int spid = 0; spid < scount; spid++) {
+
+    /* Get a hold of the ith part in ci. */
+    struct spart *spi = &sparts[ind[spid]];
+    const float spix[3] = {(float)(spi->x[0] - ci->loc[0]),
+                           (float)(spi->x[1] - ci->loc[1]),
+                           (float)(spi->x[2] - ci->loc[2])};
+    const float hi = spi->h;
+    const float hig2 = hi * hi * kernel_gamma2;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (!spart_is_active(spi, e))
+      error("Inactive particle in subset function!");
+#endif
+
+    /* Loop over the parts in cj. */
+    for (int pjd = 0; pjd < count_i; pjd++) {
+
+      /* Get a pointer to the jth particle. */
+      struct part *restrict pj = &parts_j[pjd];
+      const float hj = pj->h;
+
+      /* Compute the pairwise distance. */
+      const float pjx[3] = {(float)(pj->x[0] - ci->loc[0]),
+                            (float)(pj->x[1] - ci->loc[1]),
+                            (float)(pj->x[2] - ci->loc[2])};
+      float dx[3] = {spix[0] - pjx[0], spix[1] - pjx[1], spix[2] - pjx[2]};
+      const float r2 = dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2];
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check that particles have been drifted to the current time */
+      if (pj->ti_drift != e->ti_current)
+        error("Particle pj not drifted to current time");
+#endif
+
+      /* Hit or miss? */
+      if (r2 > 0.f && r2 < hig2) {
+        IACT_STARS(r2, dx, hi, hj, spi, pj, a, H);
+      }
+    } /* loop over the parts in cj. */
+  }   /* loop over the parts in ci. */
+}
+
+/**
+ * @brief Determine which version of DOSELF1_SUBSET_STARS needs to be called
+ * depending on the optimisation level.
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param sparts The #spart to interact.
+ * @param ind The list of indices of particles in @c ci to interact with.
+ * @param scount The number of particles in @c ind.
+ */
+void DOSELF1_SUBSET_BRANCH_STARS(struct runner *r, struct cell *restrict ci,
+                                 struct spart *restrict sparts,
+                                 int *restrict ind, int scount) {
+
+  DOSELF1_SUBSET_STARS(r, ci, sparts, ind, scount);
+}
+
+/**
+ * @brief Determine which version of DOPAIR1_SUBSET_STARS needs to be called
+ * depending on the orientation of the cells or whether DOPAIR1_SUBSET_STARS
+ * needs to be called at all.
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param sparts_i The #spart to interact with @c cj.
+ * @param ind The list of indices of particles in @c ci to interact with.
+ * @param scount The number of particles in @c ind.
+ * @param cj The second #cell.
+ */
+void DOPAIR1_SUBSET_BRANCH_STARS(struct runner *r, struct cell *restrict ci,
+                                 struct spart *restrict sparts_i,
+                                 int *restrict ind, int scount,
+                                 struct cell *restrict cj) {
+
+  const struct engine *e = r->e;
+
+  /* Get the relative distance between the pairs, wrapping. */
+  double shift[3] = {0.0, 0.0, 0.0};
+  for (int k = 0; k < 3; k++) {
+    if (cj->loc[k] - ci->loc[k] < -e->s->dim[k] / 2)
+      shift[k] = e->s->dim[k];
+    else if (cj->loc[k] - ci->loc[k] > e->s->dim[k] / 2)
+      shift[k] = -e->s->dim[k];
+  }
+
+  DOPAIR1_SUBSET_STARS(r, ci, sparts_i, ind, scount, cj, shift);
+}
+
+void DOSUB_SUBSET_STARS(struct runner *r, struct cell *ci, struct spart *sparts,
+                        int *ind, int scount, struct cell *cj, int sid,
+                        int gettimer) {
+
+  const struct engine *e = r->e;
+  struct space *s = e->s;
+
+  /* Should we even bother? */
+  if (!cell_is_active_stars(ci, e) &&
+      (cj == NULL || !cell_is_active_stars(cj, e)))
+    return;
+
+  /* Find out in which sub-cell of ci the parts are. */
+  struct cell *sub = NULL;
+  if (ci->split) {
+    for (int k = 0; k < 8; k++) {
+      if (ci->progeny[k] != NULL) {
+        if (&sparts[ind[0]] >= &ci->progeny[k]->stars.parts[0] &&
+            &sparts[ind[0]] <
+                &ci->progeny[k]->stars.parts[ci->progeny[k]->stars.count]) {
+          sub = ci->progeny[k];
+          break;
+        }
+      }
+    }
+  }
+
+  /* Is this a single cell? */
+  if (cj == NULL) {
+
+    /* Recurse? */
+    if (cell_can_recurse_in_self_stars_task(ci)) {
+
+      /* Loop over all progeny. */
+      DOSUB_SUBSET_STARS(r, sub, sparts, ind, scount, NULL, -1, 0);
+      for (int j = 0; j < 8; j++)
+        if (ci->progeny[j] != sub && ci->progeny[j] != NULL)
+          DOSUB_SUBSET_STARS(r, sub, sparts, ind, scount, ci->progeny[j], -1,
+                             0);
+
+    }
+
+    /* Otherwise, compute self-interaction. */
+    else
+      DOSELF1_SUBSET_BRANCH_STARS(r, ci, sparts, ind, scount);
+  } /* self-interaction. */
+
+  /* Otherwise, it's a pair interaction. */
+  else {
+
+    /* Recurse? */
+    if (cell_can_recurse_in_pair_stars_task(ci, cj) &&
+        cell_can_recurse_in_pair_stars_task(cj, ci)) {
+
+      /* Get the type of pair if not specified explicitly. */
+      double shift[3] = {0.0, 0.0, 0.0};
+      sid = space_getsid(s, &ci, &cj, shift);
+
+      /* Different types of flags. */
+      switch (sid) {
+
+        /* Regular sub-cell interactions of a single cell. */
+        case 0: /* (  1 ,  1 ,  1 ) */
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 1: /* (  1 ,  1 ,  0 ) */
+          if (ci->progeny[6] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 2: /* (  1 ,  1 , -1 ) */
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          break;
+
+        case 3: /* (  1 ,  0 ,  1 ) */
+          if (ci->progeny[5] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 4: /* (  1 ,  0 ,  0 ) */
+          if (ci->progeny[4] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[4] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[4] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[4] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 5: /* (  1 ,  0 , -1 ) */
+          if (ci->progeny[4] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[4] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          break;
+
+        case 6: /* (  1 , -1 ,  1 ) */
+          if (ci->progeny[5] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          break;
+
+        case 7: /* (  1 , -1 ,  0 ) */
+          if (ci->progeny[4] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[4] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          break;
+
+        case 8: /* (  1 , -1 , -1 ) */
+          if (ci->progeny[4] == sub && cj->progeny[3] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[4], sparts, ind, scount,
+                               cj->progeny[3], -1, 0);
+          if (ci->progeny[4] != NULL && cj->progeny[3] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[3], sparts, ind, scount,
+                               ci->progeny[4], -1, 0);
+          break;
+
+        case 9: /* (  0 ,  1 ,  1 ) */
+          if (ci->progeny[3] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 10: /* (  0 ,  1 ,  0 ) */
+          if (ci->progeny[2] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[2] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[2] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[2] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+
+        case 11: /* (  0 ,  1 , -1 ) */
+          if (ci->progeny[2] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[2] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[2], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[2] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[2], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[1] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[1], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[1] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[1], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          if (ci->progeny[6] == sub && cj->progeny[5] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[6], sparts, ind, scount,
+                               cj->progeny[5], -1, 0);
+          if (ci->progeny[6] != NULL && cj->progeny[5] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[5], sparts, ind, scount,
+                               ci->progeny[6], -1, 0);
+          break;
+
+        case 12: /* (  0 ,  0 ,  1 ) */
+          if (ci->progeny[1] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[1], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[1] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[1], -1, 0);
+          if (ci->progeny[1] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[1], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[1] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[1], -1, 0);
+          if (ci->progeny[1] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[1], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[1] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[1], -1, 0);
+          if (ci->progeny[1] == sub && cj->progeny[6] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[1], sparts, ind, scount,
+                               cj->progeny[6], -1, 0);
+          if (ci->progeny[1] != NULL && cj->progeny[6] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[6], sparts, ind, scount,
+                               ci->progeny[1], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[3] == sub && cj->progeny[6] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[3], sparts, ind, scount,
+                               cj->progeny[6], -1, 0);
+          if (ci->progeny[3] != NULL && cj->progeny[6] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[6], sparts, ind, scount,
+                               ci->progeny[3], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[5] == sub && cj->progeny[6] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[5], sparts, ind, scount,
+                               cj->progeny[6], -1, 0);
+          if (ci->progeny[5] != NULL && cj->progeny[6] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[6], sparts, ind, scount,
+                               ci->progeny[5], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[0] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[0], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[0] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[0], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[2] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[2], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[2] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[2], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[4] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[4], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[4] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[4], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          if (ci->progeny[7] == sub && cj->progeny[6] != NULL)
+            DOSUB_SUBSET_STARS(r, ci->progeny[7], sparts, ind, scount,
+                               cj->progeny[6], -1, 0);
+          if (ci->progeny[7] != NULL && cj->progeny[6] == sub)
+            DOSUB_SUBSET_STARS(r, cj->progeny[6], sparts, ind, scount,
+                               ci->progeny[7], -1, 0);
+          break;
+      }
+
+    }
+
+    /* Otherwise, compute the pair directly. */
+    else if (cell_is_active_stars(ci, e)) {
+
+      /* Do any of the cells need to be drifted first? */
+      if (cell_is_active_stars(ci, e)) {
+        if (!cell_are_spart_drifted(ci, e)) error("Cell should be drifted!");
+        if (!cell_are_part_drifted(cj, e)) error("Cell should be drifted!");
+      }
+
+      DOPAIR1_SUBSET_BRANCH_STARS(r, ci, sparts, ind, scount, cj);
+    }
+
+  } /* otherwise, pair interaction. */
+}
+
+/**
+ * @brief Determine which version of DOSELF1_STARS needs to be called depending
+ * on the optimisation level.
+ *
+ * @param r #runner
+ * @param c #cell c
+ *
+ */
+void DOSELF1_BRANCH_STARS(struct runner *r, struct cell *c) {
+
+  const struct engine *restrict e = r->e;
+
+  /* Anything to do here? */
+  if (c->stars.count == 0) return;
+
+  /* Anything to do here? */
+  if (!cell_is_active_stars(c, e)) return;
+
+  /* Did we mess up the recursion? */
+  if (c->stars.h_max_old * kernel_gamma > c->dmin)
+    error("Cell smaller than smoothing length");
+
+  DOSELF1_STARS(r, c, 1);
+}
+
+#define RUNNER_CHECK_SORT(TYPE, PART, cj, ci, sid)                          \
+  ({                                                                        \
+    const struct entry *restrict sort_j = cj->TYPE.sort[sid];               \
+                                                                            \
+    for (int pjd = 0; pjd < cj->TYPE.count; pjd++) {                        \
+      const struct PART *p = &cj->TYPE.parts[sort_j[pjd].i];                \
+      if (PART##_is_inhibited(p, e)) continue;                              \
+                                                                            \
+      const float d = p->x[0] * runner_shift[sid][0] +                      \
+                      p->x[1] * runner_shift[sid][1] +                      \
+                      p->x[2] * runner_shift[sid][2];                       \
+      if ((fabsf(d - sort_j[pjd].d) - cj->TYPE.dx_max_sort) >               \
+              1.0e-4 * max(fabsf(d), cj->TYPE.dx_max_sort_old) &&           \
+          (fabsf(d - sort_j[pjd].d) - cj->TYPE.dx_max_sort) >               \
+              cj->width[0] * 1.0e-10)                                       \
+        error(                                                              \
+            "particle shift diff exceeds dx_max_sort in cell cj. "          \
+            "cj->nodeID=%d "                                                \
+            "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->" #TYPE                \
+            ".dx_max_sort=%e "                                              \
+            "cj->" #TYPE                                                    \
+            ".dx_max_sort_old=%e, cellID=%i super->cellID=%i"               \
+            "cj->depth=%d cj->maxdepth=%d",                                 \
+            cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->TYPE.dx_max_sort, \
+            cj->TYPE.dx_max_sort_old, cj->cellID, cj->hydro.super->cellID,  \
+            cj->depth, cj->maxdepth);                                       \
+    }                                                                       \
+  })
+
+/**
+ * @brief Determine which version of DOPAIR1_STARS needs to be called depending
+ * on the orientation of the cells or whether DOPAIR1_STARS needs to be called
+ * at all.
+ *
+ * @param r #runner
+ * @param ci #cell ci
+ * @param cj #cell cj
+ *
+ */
+void DOPAIR1_BRANCH_STARS(struct runner *r, struct cell *ci, struct cell *cj) {
+
+  const struct engine *restrict e = r->e;
+
+  /* Get the sort ID. */
+  double shift[3] = {0.0, 0.0, 0.0};
+  const int sid = space_getsid(e->s, &ci, &cj, shift);
+
+  const int ci_active = cell_is_active_stars(ci, e);
+  const int cj_active = cell_is_active_stars(cj, e);
+#ifdef UPDATE_STARS
+  const int ci_local = ci->nodeID == engine_rank;
+  const int cj_local = cj->nodeID == engine_rank;
+#else
+  /* here we are updating the hydro -> switch ci, cj */
+  const int ci_local = cj->nodeID == engine_rank;
+  const int cj_local = ci->nodeID == engine_rank;
+#endif
+  const int do_ci =
+      (ci->stars.count != 0 && cj->hydro.count != 0 && ci_active && ci_local);
+  const int do_cj =
+      (cj->stars.count != 0 && ci->hydro.count != 0 && cj_active && cj_local);
+
+  /* Anything to do here? */
+  if (!do_ci && !do_cj) return;
+
+  /* Check that cells are drifted. */
+  if (do_ci &&
+      (!cell_are_spart_drifted(ci, e) || !cell_are_part_drifted(cj, e)))
+    error("Interacting undrifted cells.");
+
+  /* Have the cells been sorted? */
+  if (do_ci && (!(ci->stars.sorted & (1 << sid)) ||
+                ci->stars.dx_max_sort_old > space_maxreldx * ci->dmin))
+    error("Interacting unsorted cells.");
+
+  if (do_ci && (!(cj->hydro.sorted & (1 << sid)) ||
+                cj->hydro.dx_max_sort_old > space_maxreldx * cj->dmin))
+    error("Interacting unsorted cells.");
+
+  if (do_cj &&
+      (!cell_are_part_drifted(ci, e) || !cell_are_spart_drifted(cj, e)))
+    error("Interacting undrifted cells.");
+
+  /* Have the cells been sorted? */
+  if (do_cj && (!(ci->hydro.sorted & (1 << sid)) ||
+                ci->hydro.dx_max_sort_old > space_maxreldx * ci->dmin))
+    error("Interacting unsorted cells.");
+
+  if (do_cj && (!(cj->stars.sorted & (1 << sid)) ||
+                cj->stars.dx_max_sort_old > space_maxreldx * cj->dmin))
+    error("Interacting unsorted cells.");
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (do_ci) {
+    RUNNER_CHECK_SORT(hydro, part, cj, ci, sid);
+    RUNNER_CHECK_SORT(stars, spart, ci, cj, sid);
+  }
+
+  if (do_cj) {
+    RUNNER_CHECK_SORT(hydro, part, ci, cj, sid);
+    RUNNER_CHECK_SORT(stars, spart, cj, ci, sid);
+  }
+#endif /* SWIFT_DEBUG_CHECKS */
+
+  DOPAIR1_STARS(r, ci, cj, 1);
+}
+
+/**
+ * @brief Compute grouped sub-cell interactions for pairs
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param cj The second #cell.
+ * @param sid The direction linking the cells
+ * @param gettimer Do we have a timer ?
+ *
+ * @todo Hard-code the sid on the recursive calls to avoid the
+ * redundant computations to find the sid on-the-fly.
+ */
+void DOSUB_PAIR1_STARS(struct runner *r, struct cell *ci, struct cell *cj,
+                       int sid, int gettimer) {
+
+  struct space *s = r->e->s;
+  const struct engine *e = r->e;
+
+  /* Should we even bother? */
+  const int should_do_ci = ci->stars.count != 0 && cj->hydro.count != 0 &&
+                           cell_is_active_stars(ci, e);
+  const int should_do_cj = cj->stars.count != 0 && ci->hydro.count != 0 &&
+                           cell_is_active_stars(cj, e);
+  if (!should_do_ci && !should_do_cj) return;
+
+  /* Get the type of pair if not specified explicitly. */
+  double shift[3];
+  sid = space_getsid(s, &ci, &cj, shift);
+
+  /* Recurse? */
+  if (cell_can_recurse_in_pair_stars_task(ci, cj) &&
+      cell_can_recurse_in_pair_stars_task(cj, ci)) {
+
+    /* Different types of flags. */
+    switch (sid) {
+
+      /* Regular sub-cell interactions of a single cell. */
+      case 0: /* (  1 ,  1 ,  1 ) */
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        break;
+
+      case 1: /* (  1 ,  1 ,  0 ) */
+        if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[0], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[1], -1, 0);
+        break;
+
+      case 2: /* (  1 ,  1 , -1 ) */
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        break;
+
+      case 3: /* (  1 ,  0 ,  1 ) */
+        if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[0], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[2], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[2], -1, 0);
+        break;
+
+      case 4: /* (  1 ,  0 ,  0 ) */
+        if (ci->progeny[4] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[0], -1, 0);
+        if (ci->progeny[4] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[1], -1, 0);
+        if (ci->progeny[4] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[2], -1, 0);
+        if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[3], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[0], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[1], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[2], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[3], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[0], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[2], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[3], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[1], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[2], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[3], -1, 0);
+        break;
+
+      case 5: /* (  1 ,  0 , -1 ) */
+        if (ci->progeny[4] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[1], -1, 0);
+        if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[3], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[3], -1, 0);
+        break;
+
+      case 6: /* (  1 , -1 ,  1 ) */
+        if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[2], -1, 0);
+        break;
+
+      case 7: /* (  1 , -1 ,  0 ) */
+        if (ci->progeny[4] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[2], -1, 0);
+        if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[3], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[2], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[3], -1, 0);
+        break;
+
+      case 8: /* (  1 , -1 , -1 ) */
+        if (ci->progeny[4] != NULL && cj->progeny[3] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[4], cj->progeny[3], -1, 0);
+        break;
+
+      case 9: /* (  0 ,  1 ,  1 ) */
+        if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[0], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[4], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[4], -1, 0);
+        break;
+
+      case 10: /* (  0 ,  1 ,  0 ) */
+        if (ci->progeny[2] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[0], -1, 0);
+        if (ci->progeny[2] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[1], -1, 0);
+        if (ci->progeny[2] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[4], -1, 0);
+        if (ci->progeny[2] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[5], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[0], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[1], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[4], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[5], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[0], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[4], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[5], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[1], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[4], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[5], -1, 0);
+        break;
+
+      case 11: /* (  0 ,  1 , -1 ) */
+        if (ci->progeny[2] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[1], -1, 0);
+        if (ci->progeny[2] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[2], cj->progeny[5], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[1] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[1], -1, 0);
+        if (ci->progeny[6] != NULL && cj->progeny[5] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[6], cj->progeny[5], -1, 0);
+        break;
+
+      case 12: /* (  0 ,  0 ,  1 ) */
+        if (ci->progeny[1] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[1], cj->progeny[0], -1, 0);
+        if (ci->progeny[1] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[1], cj->progeny[2], -1, 0);
+        if (ci->progeny[1] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[1], cj->progeny[4], -1, 0);
+        if (ci->progeny[1] != NULL && cj->progeny[6] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[1], cj->progeny[6], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[0], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[2], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[4], -1, 0);
+        if (ci->progeny[3] != NULL && cj->progeny[6] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[3], cj->progeny[6], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[0], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[2], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[4], -1, 0);
+        if (ci->progeny[5] != NULL && cj->progeny[6] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[5], cj->progeny[6], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[0] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[0], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[2] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[2], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[4] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[4], -1, 0);
+        if (ci->progeny[7] != NULL && cj->progeny[6] != NULL)
+          DOSUB_PAIR1_STARS(r, ci->progeny[7], cj->progeny[6], -1, 0);
+        break;
+    }
+
+  }
+
+  /* Otherwise, compute the pair directly. */
+  else {
+
+#ifdef UPDATE_STARS
+    const int ci_local = ci->nodeID == engine_rank;
+    const int cj_local = cj->nodeID == engine_rank;
+#else
+    /* here we are updating the hydro -> switch ci, cj */
+    const int ci_local = cj->nodeID == engine_rank;
+    const int cj_local = ci->nodeID == engine_rank;
+#endif
+    const int do_ci = ci->stars.count != 0 && cj->hydro.count != 0 &&
+                      cell_is_active_stars(ci, e) && ci_local;
+    const int do_cj = cj->stars.count != 0 && ci->hydro.count != 0 &&
+                      cell_is_active_stars(cj, e) && cj_local;
+
+    if (do_ci) {
+
+      /* Make sure both cells are drifted to the current timestep. */
+      if (!cell_are_spart_drifted(ci, e))
+        error("Interacting undrifted cells (sparts).");
+
+      if (!cell_are_part_drifted(cj, e))
+        error("Interacting undrifted cells (parts).");
+
+      /* Do any of the cells need to be sorted first? */
+      if (!(ci->stars.sorted & (1 << sid)) ||
+          ci->stars.dx_max_sort_old > ci->dmin * space_maxreldx) {
+        error("Interacting unsorted cell (sparts).");
+      }
+
+      if (!(cj->hydro.sorted & (1 << sid)) ||
+          cj->hydro.dx_max_sort_old > cj->dmin * space_maxreldx)
+        error("Interacting unsorted cell (parts).");
+    }
+
+    if (do_cj) {
+
+      /* Make sure both cells are drifted to the current timestep. */
+      if (!cell_are_part_drifted(ci, e))
+        error("Interacting undrifted cells (parts).");
+
+      if (!cell_are_spart_drifted(cj, e))
+        error("Interacting undrifted cells (sparts).");
+
+      /* Do any of the cells need to be sorted first? */
+      if (!(ci->hydro.sorted & (1 << sid)) ||
+          ci->hydro.dx_max_sort_old > ci->dmin * space_maxreldx) {
+        error("Interacting unsorted cell (parts).");
+      }
+
+      if (!(cj->stars.sorted & (1 << sid)) ||
+          cj->stars.dx_max_sort_old > cj->dmin * space_maxreldx) {
+        error("Interacting unsorted cell (sparts).");
+      }
+    }
+
+    if (do_ci || do_cj) DOPAIR1_BRANCH_STARS(r, ci, cj);
+  }
+}
+
+/**
+ * @brief Compute grouped sub-cell interactions for self tasks
+ *
+ * @param r The #runner.
+ * @param ci The first #cell.
+ * @param gettimer Do we have a timer ?
+ */
+void DOSUB_SELF1_STARS(struct runner *r, struct cell *ci, int gettimer) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ci->nodeID != engine_rank)
+    error("This function should not be called on foreign cells");
+#endif
+
+  /* Should we even bother? */
+  if (ci->hydro.count == 0 || ci->stars.count == 0 ||
+      !cell_is_active_stars(ci, r->e))
+    return;
+
+  /* Recurse? */
+  if (cell_can_recurse_in_self_stars_task(ci)) {
+
+    /* Loop over all progeny. */
+    for (int k = 0; k < 8; k++)
+      if (ci->progeny[k] != NULL) {
+        DOSUB_SELF1_STARS(r, ci->progeny[k], 0);
+        for (int j = k + 1; j < 8; j++)
+          if (ci->progeny[j] != NULL)
+            DOSUB_PAIR1_STARS(r, ci->progeny[k], ci->progeny[j], -1, 0);
+      }
+  }
+
+  /* Otherwise, compute self-interaction. */
+  else {
+
+    /* Drift the cell to the current timestep if needed. */
+    if (!cell_are_spart_drifted(ci, r->e)) error("Interacting undrifted cell.");
+
+    DOSELF1_BRANCH_STARS(r, ci);
+  }
+}
diff --git a/src/runner_doiact_vec.c b/src/runner_doiact_vec.c
index 2e86280d64491ee1750f41c2cd22ab01c08e30b8..182e81e99c442cf5e27405ea71321e22e7f374e3 100644
--- a/src/runner_doiact_vec.c
+++ b/src/runner_doiact_vec.c
@@ -23,9 +23,6 @@
 /* This object's header. */
 #include "runner_doiact_vec.h"
 
-/* Local headers. */
-#include "active.h"
-
 #if defined(WITH_VECTORIZATION) && defined(GADGET2_SPH)
 
 static const vector kernel_gamma2_vec = FILL_VEC(kernel_gamma2);
@@ -68,8 +65,6 @@ __attribute__((always_inline)) INLINE static void calcRemInteractions(
     vector *v_curlvzSum, vector v_hi_inv, vector v_vix, vector v_viy,
     vector v_viz, int *icount_align) {
 
-  mask_t int_mask, int_mask2;
-
   /* Work out the number of remainder interactions and pad secondary cache. */
   *icount_align = icount;
   int rem = icount % (NUM_VEC_PROC * VEC_SIZE);
@@ -78,6 +73,7 @@ __attribute__((always_inline)) INLINE static void calcRemInteractions(
     *icount_align += pad;
 
     /* Initialise masks to true. */
+    mask_t int_mask, int_mask2;
     vec_init_mask_true(int_mask);
     vec_init_mask_true(int_mask2);
 
@@ -270,10 +266,10 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
     int *max_index_i, int *max_index_j, int *init_pi, int *init_pj,
     const timebin_t max_active_bin, const int active_ci, const int active_cj) {
 
-  const struct part *restrict parts_i = ci->parts;
-  const struct part *restrict parts_j = cj->parts;
+  const struct part *restrict parts_i = ci->hydro.parts;
+  const struct part *restrict parts_j = cj->hydro.parts;
 
-  int first_pi = 0, last_pj = cj->count - 1;
+  int first_pi = 0, last_pj = cj->hydro.count - 1;
   int temp, active_id;
 
   /* Only populate max_index array for local actve cells. */
@@ -281,7 +277,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
 
     /* Find the leftmost active particle in cell i that interacts with any
      * particle in cell j. */
-    first_pi = ci->count;
+    first_pi = ci->hydro.count;
     active_id = first_pi - 1;
     while (first_pi > 0 && sort_i[first_pi - 1].d + dx_max + hi_max > dj_min) {
       first_pi--;
@@ -295,7 +291,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
 
     /* Find the maximum index into cell j for each particle in range in cell i.
      */
-    if (first_pi < ci->count) {
+    if (first_pi < ci->hydro.count) {
 
       /* Start from the first particle in cell j. */
       temp = 0;
@@ -305,33 +301,33 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
           sort_i[first_pi].d + pi->h * kernel_gamma + dx_max - rshift;
 
       /* Loop through particles in cell j until they are not in range of pi.
-       * Make sure that temp stays between 0 and cj->count - 1.*/
-      while (temp < cj->count - 1 && first_di > sort_j[temp].d) temp++;
+       * Make sure that temp stays between 0 and cj->hydro.count - 1.*/
+      while (temp < cj->hydro.count - 1 && first_di > sort_j[temp].d) temp++;
 
       max_index_i[first_pi] = temp;
 
       /* Populate max_index_i for remaining particles that are within range. */
-      for (int i = first_pi + 1; i < ci->count; i++) {
+      for (int i = first_pi + 1; i < ci->hydro.count; i++) {
         temp = max_index_i[i - 1];
         pi = &parts_i[sort_i[i].i];
 
         const float di = sort_i[i].d + pi->h * kernel_gamma + dx_max - rshift;
 
-        /* Make sure that temp stays between 0 and cj->count - 1.*/
-        while (temp < cj->count - 1 && di > sort_j[temp].d) temp++;
+        /* Make sure that temp stays between 0 and cj->hydro.count - 1.*/
+        while (temp < cj->hydro.count - 1 && di > sort_j[temp].d) temp++;
 
         max_index_i[i] = temp;
       }
     } else {
       /* Make sure that max index is set to first particle in cj.*/
-      max_index_i[ci->count - 1] = 0;
+      max_index_i[ci->hydro.count - 1] = 0;
     }
   } else {
     /* Make sure that foreign cells are only read into the cache if the local
      * cell requires it.
      * Also ensure that it does not require any particles from cj. */
-    first_pi = ci->count - 1;
-    max_index_i[ci->count - 1] = 0;
+    first_pi = ci->hydro.count - 1;
+    max_index_i[ci->hydro.count - 1] = 0;
   }
 
   /* Only populate max_index array for local actve cells. */
@@ -340,7 +336,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
      * particle in cell i. */
     last_pj = -1;
     active_id = last_pj;
-    while (last_pj < cj->count &&
+    while (last_pj < cj->hydro.count &&
            sort_j[last_pj + 1].d - hj_max - dx_max < di_max) {
       last_pj++;
       /* Store the index of the particle if it is active. */
@@ -356,7 +352,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
     if (last_pj >= 0) {
 
       /* Start from the last particle in cell i. */
-      temp = ci->count - 1;
+      temp = ci->hydro.count - 1;
 
       const struct part *pj = &parts_j[sort_j[last_pj].i];
       const float last_dj =
@@ -379,14 +375,14 @@ __attribute__((always_inline)) INLINE static void populate_max_index_density(
       }
     } else {
       /* Make sure that max index is set to last particle in ci.*/
-      max_index_j[0] = ci->count - 1;
+      max_index_j[0] = ci->hydro.count - 1;
     }
   } else {
     /* Make sure that foreign cells are only read into the cache if the local
      * cell requires it.
      * Also ensure that it does not require any particles from ci. */
     last_pj = 0;
-    max_index_j[0] = ci->count - 1;
+    max_index_j[0] = ci->hydro.count - 1;
   }
 
   *init_pi = first_pi;
@@ -430,10 +426,10 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
     int *init_pj, const timebin_t max_active_bin, const int active_ci,
     const int active_cj) {
 
-  const struct part *restrict parts_i = ci->parts;
-  const struct part *restrict parts_j = cj->parts;
+  const struct part *restrict parts_i = ci->hydro.parts;
+  const struct part *restrict parts_j = cj->hydro.parts;
 
-  int first_pi = 0, last_pj = cj->count - 1;
+  int first_pi = 0, last_pj = cj->hydro.count - 1;
   int temp, active_id;
 
   /* Only populate max_index array for local actve cells. */
@@ -441,7 +437,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
 
     /* Find the leftmost active particle in cell i that interacts with any
      * particle in cell j. */
-    first_pi = ci->count;
+    first_pi = ci->hydro.count;
     active_id = first_pi - 1;
     while (first_pi > 0 && sort_i[first_pi - 1].d + dx_max + h_max > dj_min) {
       first_pi--;
@@ -455,7 +451,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
 
     /* Find the maximum index into cell j for each particle in range in cell i.
      */
-    if (first_pi < ci->count) {
+    if (first_pi < ci->hydro.count) {
 
       /* Start from the first particle in cell j. */
       temp = 0;
@@ -466,34 +462,34 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
                              rshift;
 
       /* Loop through particles in cell j until they are not in range of pi.
-       * Make sure that temp stays between 0 and cj->count - 1.*/
-      while (temp < cj->count - 1 && first_di > sort_j[temp].d) temp++;
+       * Make sure that temp stays between 0 and cj->hydro.count - 1.*/
+      while (temp < cj->hydro.count - 1 && first_di > sort_j[temp].d) temp++;
 
       max_index_i[first_pi] = temp;
 
       /* Populate max_index_i for remaining particles that are within range. */
-      for (int i = first_pi + 1; i < ci->count; i++) {
+      for (int i = first_pi + 1; i < ci->hydro.count; i++) {
         temp = max_index_i[i - 1];
         pi = &parts_i[sort_i[i].i];
 
         const float di = sort_i[i].d + max(pi->h, hj_max_raw) * kernel_gamma +
                          dx_max - rshift;
 
-        /* Make sure that temp stays between 0 and cj->count - 1.*/
-        while (temp < cj->count - 1 && di > sort_j[temp].d) temp++;
+        /* Make sure that temp stays between 0 and cj->hydro.count - 1.*/
+        while (temp < cj->hydro.count - 1 && di > sort_j[temp].d) temp++;
 
         max_index_i[i] = temp;
       }
     } else {
       /* Make sure that max index is set to first particle in cj.*/
-      max_index_i[ci->count - 1] = 0;
+      max_index_i[ci->hydro.count - 1] = 0;
     }
   } else {
     /* Make sure that foreign cells are only read into the cache if the local
      * cell requires it.
      * Also ensure that it does not require any particles from cj. */
-    first_pi = ci->count - 1;
-    max_index_i[ci->count - 1] = 0;
+    first_pi = ci->hydro.count - 1;
+    max_index_i[ci->hydro.count - 1] = 0;
   }
 
   /* Only populate max_index array for local actve cells. */
@@ -502,7 +498,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
      * particle in cell i. */
     last_pj = -1;
     active_id = last_pj;
-    while (last_pj < cj->count &&
+    while (last_pj < cj->hydro.count &&
            sort_j[last_pj + 1].d - h_max - dx_max < di_max) {
       last_pj++;
       /* Store the index of the particle if it is active. */
@@ -518,7 +514,7 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
     if (last_pj >= 0) {
 
       /* Start from the last particle in cell i. */
-      temp = ci->count - 1;
+      temp = ci->hydro.count - 1;
 
       const struct part *pj = &parts_j[sort_j[last_pj].i];
       const float last_dj = sort_j[last_pj].d - dx_max -
@@ -543,14 +539,14 @@ __attribute__((always_inline)) INLINE static void populate_max_index_force(
       }
     } else {
       /* Make sure that max index is set to last particle in ci.*/
-      max_index_j[0] = ci->count - 1;
+      max_index_j[0] = ci->hydro.count - 1;
     }
   } else {
     /* Make sure that foreign cells are only read into the cache if the local
      * cell requires it.
      * Also ensure that it does not require any particles from ci. */
     last_pj = 0;
-    max_index_j[0] = ci->count - 1;
+    max_index_j[0] = ci->hydro.count - 1;
   }
 
   *init_pi = first_pi;
@@ -654,21 +650,21 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
 
   /* Get some local variables */
   const struct engine *e = r->e;
-  const timebin_t max_active_bin = e->max_active_bin;
-  struct part *restrict parts = c->parts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  const int count = c->hydro.count;
 
   TIMER_TIC;
 
   /* Anything to do here? */
   if (!cell_is_active_hydro(c, e)) return;
 
+  /* Check that everybody was drifted here */
   if (!cell_are_part_drifted(c, e)) error("Interacting undrifted cell.");
 
 #ifdef SWIFT_DEBUG_CHECKS
   for (int i = 0; i < count; i++) {
     /* Check that particles have been drifted to the current time */
-    if (parts[i].ti_drift != e->ti_current)
+    if (parts[i].ti_drift != e->ti_current && !part_is_inhibited(&parts[i], e))
       error("Particle pi not drifted to current time");
   }
 #endif
@@ -679,7 +675,7 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
   if (cell_cache->count < count) cache_init(cell_cache, count);
 
   /* Read the particles from the cell and store them locally in the cache. */
-  cache_read_particles(c, cell_cache);
+  const int count_align = cache_read_particles(c, cell_cache);
 
   /* Create secondary cache to store particle interactions. */
   struct c2_cache int_cache;
@@ -690,25 +686,23 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
     /* Get a pointer to the ith particle. */
     struct part *restrict pi = &parts[pid];
 
-    /* Is the ith particle active? */
-    if (!part_is_active_no_debug(pi, max_active_bin)) continue;
-
-    const float hi = cell_cache->h[pid];
+    /* Is the i^th particle active? */
+    if (!part_is_active(pi, e)) continue;
 
     /* Fill particle pi vectors. */
     const vector v_pix = vector_set1(cell_cache->x[pid]);
     const vector v_piy = vector_set1(cell_cache->y[pid]);
     const vector v_piz = vector_set1(cell_cache->z[pid]);
-    const vector v_hi = vector_set1(hi);
+    const vector v_hi = vector_set1(cell_cache->h[pid]);
     const vector v_vix = vector_set1(cell_cache->vx[pid]);
     const vector v_viy = vector_set1(cell_cache->vy[pid]);
     const vector v_viz = vector_set1(cell_cache->vz[pid]);
 
+    /* Some useful mulitples of h */
+    const float hi = cell_cache->h[pid];
     const float hig2 = hi * hi * kernel_gamma2;
     const vector v_hig2 = vector_set1(hig2);
-
-    /* Get the inverse of hi. */
-    vector v_hi_inv = vec_reciprocal(v_hi);
+    const vector v_hi_inv = vec_reciprocal(v_hi);
 
     /* Reset cumulative sums of update vectors. */
     vector v_rhoSum = vector_setzero();
@@ -720,21 +714,6 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
     vector v_curlvySum = vector_setzero();
     vector v_curlvzSum = vector_setzero();
 
-    /* Pad cache if there is a serial remainder. */
-    int count_align = count;
-    const int rem = count % (NUM_VEC_PROC * VEC_SIZE);
-    if (rem != 0) {
-      count_align += (NUM_VEC_PROC * VEC_SIZE) - rem;
-
-      /* Set positions to the same as particle pi so when the r2 > 0 mask is
-       * applied these extra contributions are masked out.*/
-      for (int i = count; i < count_align; i++) {
-        cell_cache->x[i] = v_pix.f[0];
-        cell_cache->y[i] = v_piy.f[0];
-        cell_cache->z[i] = v_piz.f[0];
-      }
-    }
-
     /* The number of interactions for pi and the padded version of it to
      * make it a multiple of VEC_SIZE. */
     int icount = 0, icount_align = 0;
@@ -771,8 +750,8 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
       v_r2_2.v = vec_fma(v_dz_2.v, v_dz_2.v, v_r2_2.v);
 
       /* Form a mask from r2 < hig2 and r2 > 0.*/
-      mask_t v_doi_mask, v_doi_mask_self_check, v_doi_mask2,
-          v_doi_mask2_self_check;
+      mask_t v_doi_mask, v_doi_mask2;
+      mask_t v_doi_mask_self_check, v_doi_mask2_self_check;
 
       /* Form r2 > 0 mask and r2 < hig2 mask. */
       vec_create_mask(v_doi_mask_self_check, vec_cmp_gt(v_r2.v, vec_setzero()));
@@ -789,6 +768,25 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
       const int doi_mask2 = vec_is_mask_true(v_doi_mask2) &
                             vec_is_mask_true(v_doi_mask2_self_check);
 
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Verify that we have no inhibited particles in the interaction cache */
+      for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        if (doi_mask & (1 << bit_index)) {
+          if (parts[pjd + bit_index].time_bin >= time_bin_inhibited) {
+            error("Inhibited particle in interaction cache!");
+          }
+        }
+      }
+      for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        if (doi_mask2 & (1 << bit_index)) {
+          if (parts[pjd + VEC_SIZE + bit_index].time_bin >=
+              time_bin_inhibited) {
+            error("Inhibited particle in interaction cache2!");
+          }
+        }
+      }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
       for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
         if (doi_mask & (1 << bit_index)) {
@@ -837,7 +835,7 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
     vec_init_mask_true(int_mask);
     vec_init_mask_true(int_mask2);
 
-    /* Perform interaction with 2 vectors. */
+    /* Perform interaction with NUM_VEC_PROC vectors. */
     for (int pjd = 0; pjd < icount_align; pjd += (NUM_VEC_PROC * VEC_SIZE)) {
       runner_iact_nonsym_2_vec_density(
           &int_cache.r2q[pjd], &int_cache.dxq[pjd], &int_cache.dyq[pjd],
@@ -848,8 +846,7 @@ void runner_doself1_density_vec(struct runner *r, struct cell *restrict c) {
           &v_curlvzSum, int_mask, int_mask2, 0);
     }
 
-    /* Perform horizontal adds on vector sums and store result in particle pi.
-     */
+    /* Perform horizontal adds on vector sums and store result in pi. */
     VEC_HADD(v_rhoSum, pi->rho);
     VEC_HADD(v_rho_dhSum, pi->density.rho_dh);
     VEC_HADD(v_wcountSum, pi->density.wcount);
@@ -888,7 +885,7 @@ void runner_doself_subset_density_vec(struct runner *r, struct cell *restrict c,
 
 #if defined(WITH_VECTORIZATION) && defined(GADGET2_SPH)
 
-  const int count = c->count;
+  const int count = c->hydro.count;
 
   TIMER_TIC;
 
@@ -899,7 +896,7 @@ void runner_doself_subset_density_vec(struct runner *r, struct cell *restrict c,
   if (cell_cache->count < count) cache_init(cell_cache, count);
 
   /* Read the particles from the cell and store them locally in the cache. */
-  cache_read_particles(c, cell_cache);
+  const int count_align = cache_read_particles_subset_self(c, cell_cache);
 
   /* Create secondary cache to store particle interactions. */
   struct c2_cache int_cache;
@@ -942,23 +939,6 @@ void runner_doself_subset_density_vec(struct runner *r, struct cell *restrict c,
     vector v_curlvySum = vector_setzero();
     vector v_curlvzSum = vector_setzero();
 
-    /* Pad cache if there is a serial remainder. */
-    int count_align = count;
-    const int rem = count % (NUM_VEC_PROC * VEC_SIZE);
-    if (rem != 0) {
-      const int pad = (NUM_VEC_PROC * VEC_SIZE) - rem;
-
-      count_align += pad;
-
-      /* Set positions to the same as particle pi so when the r2 > 0 mask is
-       * applied these extra contributions are masked out.*/
-      for (int i = count; i < count_align; i++) {
-        cell_cache->x[i] = v_pix.f[0];
-        cell_cache->y[i] = v_piy.f[0];
-        cell_cache->z[i] = v_piz.f[0];
-      }
-    }
-
     /* The number of interactions for pi and the padded version of it to
      * make it a multiple of VEC_SIZE. */
     int icount = 0, icount_align = 0;
@@ -1015,9 +995,33 @@ void runner_doself_subset_density_vec(struct runner *r, struct cell *restrict c,
       const int doi_mask2 = vec_is_mask_true(v_doi_mask2) &
                             vec_is_mask_true(v_doi_mask2_self_check);
 
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Verify that we have no inhibited particles in the interaction cache */
+      for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        struct part *restrict parts_i = c->hydro.parts;
+
+        if (doi_mask & (1 << bit_index)) {
+          if (parts_i[pjd + bit_index].time_bin >= time_bin_inhibited) {
+            error("Inhibited particle in interaction cache!");
+          }
+        }
+      }
+      for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        struct part *restrict parts_i = c->hydro.parts;
+
+        if (doi_mask2 & (1 << bit_index)) {
+          if (parts_i[pjd + VEC_SIZE + bit_index].time_bin >=
+              time_bin_inhibited) {
+            error("Inhibited particle in interaction cache2!");
+          }
+        }
+      }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
-      struct part *restrict parts_i = c->parts;
       for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        struct part *restrict parts_i = c->hydro.parts;
+
         if (doi_mask & (1 << bit_index)) {
           if (pi->num_ngb_density < MAX_NUM_OF_NEIGHBOURS)
             pi->ids_ngbs_density[pi->num_ngb_density] =
@@ -1112,9 +1116,8 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
 
   const struct engine *e = r->e;
   const struct cosmology *restrict cosmo = e->cosmology;
-  const timebin_t max_active_bin = e->max_active_bin;
-  struct part *restrict parts = c->parts;
-  const int count = c->count;
+  struct part *restrict parts = c->hydro.parts;
+  const int count = c->hydro.count;
 
   TIMER_TIC;
 
@@ -1126,7 +1129,7 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
 #ifdef SWIFT_DEBUG_CHECKS
   for (int i = 0; i < count; i++) {
     /* Check that particles have been drifted to the current time */
-    if (parts[i].ti_drift != e->ti_current)
+    if (parts[i].ti_drift != e->ti_current && !part_is_inhibited(&parts[i], e))
       error("Particle pi not drifted to current time");
   }
 #endif
@@ -1138,7 +1141,7 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
   if (cell_cache->count < count) cache_init(cell_cache, count);
 
   /* Read the particles from the cell and store them locally in the cache. */
-  cache_read_force_particles(c, cell_cache);
+  const int count_align = cache_read_force_particles(c, cell_cache);
 
   /* Cosmological terms */
   const float a = cosmo->a;
@@ -1150,16 +1153,14 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
     /* Get a pointer to the ith particle. */
     struct part *restrict pi = &parts[pid];
 
-    /* Is the ith particle active? */
-    if (!part_is_active_no_debug(pi, max_active_bin)) continue;
-
-    const float hi = cell_cache->h[pid];
+    /* Is the i^th particle active? */
+    if (!part_is_active(pi, e)) continue;
 
     /* Fill particle pi vectors. */
     const vector v_pix = vector_set1(cell_cache->x[pid]);
     const vector v_piy = vector_set1(cell_cache->y[pid]);
     const vector v_piz = vector_set1(cell_cache->z[pid]);
-    const vector v_hi = vector_set1(hi);
+    const vector v_hi = vector_set1(cell_cache->h[pid]);
     const vector v_vix = vector_set1(cell_cache->vx[pid]);
     const vector v_viy = vector_set1(cell_cache->vy[pid]);
     const vector v_viz = vector_set1(cell_cache->vz[pid]);
@@ -1170,11 +1171,11 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
     const vector v_balsara_i = vector_set1(cell_cache->balsara[pid]);
     const vector v_ci = vector_set1(cell_cache->soundspeed[pid]);
 
+    /* Some useful powers of h */
+    const float hi = cell_cache->h[pid];
     const float hig2 = hi * hi * kernel_gamma2;
     const vector v_hig2 = vector_set1(hig2);
-
-    /* Get the inverse of hi. */
-    vector v_hi_inv = vec_reciprocal(v_hi);
+    const vector v_hi_inv = vec_reciprocal(v_hi);
 
     /* Reset cumulative sums of update vectors. */
     vector v_a_hydro_xSum = vector_setzero();
@@ -1184,39 +1185,18 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
     vector v_sigSum = vector_set1(pi->force.v_sig);
     vector v_entropy_dtSum = vector_setzero();
 
-    /* Pad cache if there is a serial remainder. */
-    int count_align = count;
-    int rem = count % VEC_SIZE;
-    if (rem != 0) {
-      int pad = VEC_SIZE - rem;
-
-      count_align += pad;
-
-      /* Set positions to the same as particle pi so when the r2 > 0 mask is
-       * applied these extra contributions are masked out.*/
-      for (int i = count; i < count_align; i++) {
-        cell_cache->x[i] = v_pix.f[0];
-        cell_cache->y[i] = v_piy.f[0];
-        cell_cache->z[i] = v_piz.f[0];
-        cell_cache->h[i] = 1.f;
-        cell_cache->rho[i] = 1.f;
-        cell_cache->grad_h[i] = 1.f;
-        cell_cache->pOrho2[i] = 1.f;
-        cell_cache->balsara[i] = 1.f;
-        cell_cache->soundspeed[i] = 1.f;
-      }
-    }
-
     /* Find all of particle pi's interacions and store needed values in the
      * secondary cache.*/
     for (int pjd = 0; pjd < count_align; pjd += VEC_SIZE) {
 
       /* Load 1 set of vectors from the particle cache. */
-      vector hjg2;
       const vector v_pjx = vector_load(&cell_cache->x[pjd]);
       const vector v_pjy = vector_load(&cell_cache->y[pjd]);
       const vector v_pjz = vector_load(&cell_cache->z[pjd]);
       const vector hj = vector_load(&cell_cache->h[pjd]);
+
+      /* (hj * gamma)^2 */
+      vector hjg2;
       hjg2.v = vec_mul(vec_mul(hj.v, hj.v), kernel_gamma2_vec.v);
 
       /* Compute the pairwise distance. */
@@ -1229,20 +1209,33 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
       v_r2.v = vec_fma(v_dy.v, v_dy.v, v_r2.v);
       v_r2.v = vec_fma(v_dz.v, v_dz.v, v_r2.v);
 
-      /* Form r2 > 0 mask, r2 < hig2 mask and r2 < hjg2 mask. */
-      mask_t v_doi_mask, v_doi_mask_self_check;
-
-      /* Form r2 > 0 mask.*/
+      /* Form r2 > 0 mask.
+       * This is used to avoid self-interctions */
+      mask_t v_doi_mask_self_check;
       vec_create_mask(v_doi_mask_self_check, vec_cmp_gt(v_r2.v, vec_setzero()));
 
-      /* Form a mask from r2 < hig2 mask and r2 < hjg2 mask. */
-      vector v_h2;
-      v_h2.v = vec_fmax(v_hig2.v, hjg2.v);
-      vec_create_mask(v_doi_mask, vec_cmp_lt(v_r2.v, v_h2.v));
+      /* Form a mask from r2 < hig2 mask and r2 < hjg2 mask.
+       * This is writen as r2 < max(hig2, hjg2) */
+      mask_t v_doi_mask;
+      vec_create_mask(v_doi_mask,
+                      vec_cmp_lt(v_r2.v, vec_fmax(v_hig2.v, hjg2.v)));
 
-      /* Combine all 3 masks. */
+      /* Combine both masks. */
       vec_combine_masks(v_doi_mask, v_doi_mask_self_check);
 
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Verify that we have no inhibited particles in the interaction cache */
+      for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+        if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
+          if ((pjd + bit_index < count) &&
+              (parts[pjd + bit_index].time_bin >= time_bin_inhibited)) {
+            error("Inhibited particle in interaction cache! id=%lld",
+                  parts[pjd + bit_index].id);
+          }
+        }
+      }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
       for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
         if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
@@ -1255,10 +1248,14 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
 
       /* If there are any interactions perform them. */
       if (vec_is_mask_true(v_doi_mask)) {
-        vector v_hj_inv = vec_reciprocal(hj);
 
-        /* To stop floating point exceptions for when particle separations are
-         * 0. */
+        /* 1 / hj */
+        const vector v_hj_inv = vec_reciprocal(hj);
+
+        /* To stop floating point exceptions when particle separations are 0.
+         * Note that the results for r2==0 are masked out but may still raise
+         * an FPE as only the final operaion is masked, not the whole math
+         * operations sequence. */
         v_r2.v = vec_add(v_r2.v, vec_set1(FLT_MIN));
 
         runner_iact_nonsym_1_vec_force(
@@ -1278,9 +1275,10 @@ void runner_doself2_force_vec(struct runner *r, struct cell *restrict c) {
     VEC_HADD(v_a_hydro_ySum, pi->a_hydro[1]);
     VEC_HADD(v_a_hydro_zSum, pi->a_hydro[2]);
     VEC_HADD(v_h_dtSum, pi->force.h_dt);
-    VEC_HMAX(v_sigSum, pi->force.v_sig);
     VEC_HADD(v_entropy_dtSum, pi->entropy_dt);
 
+    VEC_HMAX(v_sigSum, pi->force.v_sig);
+
   } /* loop over all particles. */
 
   TIMER_TOC(timer_doself_force);
@@ -1322,29 +1320,31 @@ void runner_dopair1_density_vec(struct runner *r, struct cell *ci,
   for (int k = 0; k < 3; k++) rshift += shift[k] * runner_shift[sid][k];
 
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_i = ci->sort[sid];
-  const struct entry *restrict sort_j = cj->sort[sid];
+  const struct entry *restrict sort_i = ci->hydro.sort[sid];
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
 
   /* Get some other useful values. */
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  const double hi_max = ci->h_max * kernel_gamma - rshift;
-  const double hj_max = cj->h_max * kernel_gamma;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  const double hi_max = ci->hydro.h_max * kernel_gamma - rshift;
+  const double hj_max = cj->hydro.h_max * kernel_gamma;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
   const double di_max = sort_i[count_i - 1].d - rshift;
   const double dj_min = sort_j[0].d;
-  const float dx_max = (ci->dx_max_sort + cj->dx_max_sort);
+  const float dx_max = (ci->hydro.dx_max_sort + cj->hydro.dx_max_sort);
   const int active_ci = cell_is_active_hydro(ci, e) && ci_local;
   const int active_cj = cell_is_active_hydro(cj, e) && cj_local;
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that particles have been drifted to the current time */
   for (int pid = 0; pid < count_i; pid++)
-    if (parts_i[pid].ti_drift != e->ti_current)
+    if (parts_i[pid].ti_drift != e->ti_current &&
+        !part_is_inhibited(&parts_i[pid], e))
       error("Particle pi not drifted to current time");
   for (int pjd = 0; pjd < count_j; pjd++)
-    if (parts_j[pjd].ti_drift != e->ti_current)
+    if (parts_j[pjd].ti_drift != e->ti_current &&
+        !part_is_inhibited(&parts_j[pjd], e))
       error("Particle pj not drifted to current time");
 #endif
 
@@ -1497,6 +1497,21 @@ void runner_dopair1_density_vec(struct runner *r, struct cell *ci,
         /* Form r2 < hig2 mask. */
         vec_create_mask(v_doi_mask, vec_cmp_lt(v_r2.v, v_hig2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
+            if ((pjd + bit_index < count_j) &&
+                (parts_j[sort_j[pjd + bit_index].i].time_bin >=
+                 time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_j[sort_j[pjd + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
           if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
@@ -1623,6 +1638,21 @@ void runner_dopair1_density_vec(struct runner *r, struct cell *ci,
         /* Form r2 < hig2 mask. */
         vec_create_mask(v_doj_mask, vec_cmp_lt(v_r2.v, v_hjg2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          if (vec_is_mask_true(v_doj_mask) & (1 << bit_index)) {
+            if ((ci_cache_idx + first_pi + bit_index < count_i) &&
+                (parts_i[sort_i[ci_cache_idx + first_pi + bit_index].i]
+                     .time_bin >= time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_i[sort_i[ci_cache_idx + first_pi + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
           if (vec_is_mask_true(v_doj_mask) & (1 << bit_index)) {
@@ -1693,11 +1723,11 @@ void runner_dopair_subset_density_vec(struct runner *r,
 
   TIMER_TIC;
 
-  const int count_j = cj->count;
+  const int count_j = cj->hydro.count;
 
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_j = cj->sort[sid];
-  const float dxj = cj->dx_max_sort;
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
+  const float dxj = cj->hydro.dx_max_sort;
 
   /* Get both particle caches from the runner and re-allocate
    * them if they are not big enough for the cells. */
@@ -1733,7 +1763,8 @@ void runner_dopair_subset_density_vec(struct runner *r,
         runner_shift_x, runner_shift_y, runner_shift_z, sort_j, max_index_i, 0);
 
     /* Read the particles from the cell and store them locally in the cache. */
-    cache_read_particles_subset(cj, cj_cache, sort_j, 0, &last_pj, ci->loc, 0);
+    cache_read_particles_subset_pair(cj, cj_cache, sort_j, 0, &last_pj, ci->loc,
+                                     0);
 
     const double dj_min = sort_j[0].d;
 
@@ -1805,9 +1836,27 @@ void runner_dopair_subset_density_vec(struct runner *r,
         mask_t v_doi_mask;
         vec_create_mask(v_doi_mask, vec_cmp_lt(v_r2.v, v_hig2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          struct part *restrict parts_j = cj->hydro.parts;
+
+          if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
+            if ((pjd + bit_index < count_j) &&
+                (parts_j[sort_j[pjd + bit_index].i].time_bin >=
+                 time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_j[sort_j[pjd + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
-        struct part *restrict parts_j = cj->parts;
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          struct part *restrict parts_j = cj->hydro.parts;
+
           if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
             if (pi->num_ngb_density < MAX_NUM_OF_NEIGHBOURS) {
               pi->ids_ngbs_density[pi->num_ngb_density] =
@@ -1851,7 +1900,8 @@ void runner_dopair_subset_density_vec(struct runner *r,
         runner_shift_x, runner_shift_y, runner_shift_z, sort_j, max_index_i, 1);
 
     /* Read the particles from the cell and store them locally in the cache. */
-    cache_read_particles_subset(cj, cj_cache, sort_j, &first_pj, 0, ci->loc, 1);
+    cache_read_particles_subset_pair(cj, cj_cache, sort_j, &first_pj, 0,
+                                     ci->loc, 1);
 
     /* Get the number of particles read into the ci cache. */
     const int cj_cache_count = count_j - first_pj;
@@ -1934,9 +1984,27 @@ void runner_dopair_subset_density_vec(struct runner *r,
         mask_t v_doi_mask;
         vec_create_mask(v_doi_mask, vec_cmp_lt(v_r2.v, v_hig2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          struct part *restrict parts_j = cj->hydro.parts;
+
+          if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
+            if ((cj_cache_idx + bit_index < count_j) &&
+                (parts_j[sort_j[cj_cache_idx + first_pj + bit_index].i]
+                     .time_bin >= time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_j[sort_j[cj_cache_idx + first_pj + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
-        struct part *restrict parts_j = cj->parts;
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          struct part *restrict parts_j = cj->hydro.parts;
+
           if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
             if (pi->num_ngb_density < MAX_NUM_OF_NEIGHBOURS) {
               pi->ids_ngbs_density[pi->num_ngb_density] =
@@ -2007,21 +2075,21 @@ void runner_dopair2_force_vec(struct runner *r, struct cell *ci,
   for (int k = 0; k < 3; k++) rshift += shift[k] * runner_shift[sid][k];
 
   /* Pick-out the sorted lists. */
-  const struct entry *restrict sort_i = ci->sort[sid];
-  const struct entry *restrict sort_j = cj->sort[sid];
+  const struct entry *restrict sort_i = ci->hydro.sort[sid];
+  const struct entry *restrict sort_j = cj->hydro.sort[sid];
 
   /* Get some other useful values. */
-  const int count_i = ci->count;
-  const int count_j = cj->count;
-  const double hi_max = ci->h_max * kernel_gamma;
-  const double hj_max = cj->h_max * kernel_gamma;
-  const double hi_max_raw = ci->h_max;
-  const double hj_max_raw = cj->h_max;
-  struct part *restrict parts_i = ci->parts;
-  struct part *restrict parts_j = cj->parts;
+  const int count_i = ci->hydro.count;
+  const int count_j = cj->hydro.count;
+  const double hi_max = ci->hydro.h_max * kernel_gamma;
+  const double hj_max = cj->hydro.h_max * kernel_gamma;
+  const double hi_max_raw = ci->hydro.h_max;
+  const double hj_max_raw = cj->hydro.h_max;
+  struct part *restrict parts_i = ci->hydro.parts;
+  struct part *restrict parts_j = cj->hydro.parts;
   const double di_max = sort_i[count_i - 1].d - rshift;
   const double dj_min = sort_j[0].d;
-  const float dx_max = (ci->dx_max_sort + cj->dx_max_sort);
+  const float dx_max = (ci->hydro.dx_max_sort + cj->hydro.dx_max_sort);
   const int active_ci = cell_is_active_hydro(ci, e) && ci_local;
   const int active_cj = cell_is_active_hydro(cj, e) && cj_local;
 
@@ -2032,10 +2100,12 @@ void runner_dopair2_force_vec(struct runner *r, struct cell *ci,
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that particles have been drifted to the current time */
   for (int pid = 0; pid < count_i; pid++)
-    if (parts_i[pid].ti_drift != e->ti_current)
+    if (parts_i[pid].ti_drift != e->ti_current &&
+        !part_is_inhibited(&parts_i[pid], e))
       error("Particle pi not drifted to current time");
   for (int pjd = 0; pjd < count_j; pjd++)
-    if (parts_j[pjd].ti_drift != e->ti_current)
+    if (parts_j[pjd].ti_drift != e->ti_current &&
+        !part_is_inhibited(&parts_j[pjd], e))
       error("Particle pj not drifted to current time");
 #endif
 
@@ -2200,6 +2270,21 @@ void runner_dopair2_force_vec(struct runner *r, struct cell *ci,
         v_h2.v = vec_fmax(v_hig2.v, v_hjg2.v);
         vec_create_mask(v_doi_mask, vec_cmp_lt(v_r2.v, v_h2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
+            if ((pjd + bit_index < count_j) &&
+                (parts_j[sort_j[pjd + bit_index].i].time_bin >=
+                 time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_j[sort_j[pjd + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
           if (vec_is_mask_true(v_doi_mask) & (1 << bit_index)) {
@@ -2336,6 +2421,21 @@ void runner_dopair2_force_vec(struct runner *r, struct cell *ci,
         v_h2.v = vec_fmax(v_hjg2.v, v_hig2.v);
         vec_create_mask(v_doj_mask, vec_cmp_lt(v_r2.v, v_h2.v));
 
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Verify that we have no inhibited particles in the interaction cache
+         */
+        for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
+          if (vec_is_mask_true(v_doj_mask) & (1 << bit_index)) {
+            if ((ci_cache_idx + first_pi + bit_index < count_i) &&
+                (parts_i[sort_i[ci_cache_idx + first_pi + bit_index].i]
+                     .time_bin >= time_bin_inhibited)) {
+              error("Inhibited particle in interaction cache! id=%lld",
+                    parts_i[sort_i[ci_cache_idx + first_pi + bit_index].i].id);
+            }
+          }
+        }
+#endif
+
 #ifdef DEBUG_INTERACTIONS_SPH
         for (int bit_index = 0; bit_index < VEC_SIZE; bit_index++) {
           if (vec_is_mask_true(v_doj_mask) & (1 << bit_index)) {
diff --git a/src/scheduler.c b/src/scheduler.c
index 17ea18527c31df3bd6fb1f8643a3fbee071fa55b..249c62c5b20b24f71cd7362a02dd851a81ff228c 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -29,6 +29,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/stat.h>
 
 /* MPI headers. */
 #ifdef WITH_MPI
@@ -59,12 +60,43 @@
  */
 void scheduler_clear_active(struct scheduler *s) { s->active_count = 0; }
 
+/**
+ * @brief Increase the space available for unlocks. Only call when
+ *        current index == s->size_unlock;
+ */
+static void scheduler_extend_unlocks(struct scheduler *s) {
+
+  /* Allocate the new buffer. */
+  const int size_unlocks_new = s->size_unlocks * 2;
+  struct task **unlocks_new =
+      (struct task **)malloc(sizeof(struct task *) * size_unlocks_new);
+  int *unlock_ind_new = (int *)malloc(sizeof(int) * size_unlocks_new);
+  if (unlocks_new == NULL || unlock_ind_new == NULL)
+    error("Failed to re-allocate unlocks.");
+
+  /* Wait for all writes to the old buffer to complete. */
+  while (s->completed_unlock_writes < s->size_unlocks)
+    ;
+
+  /* Copy the buffers. */
+  memcpy(unlocks_new, s->unlocks, sizeof(struct task *) * s->size_unlocks);
+  memcpy(unlock_ind_new, s->unlock_ind, sizeof(int) * s->size_unlocks);
+  free(s->unlocks);
+  free(s->unlock_ind);
+  s->unlocks = unlocks_new;
+  s->unlock_ind = unlock_ind_new;
+
+  /* Publish the new buffer size. */
+  s->size_unlocks = size_unlocks_new;
+}
+
 /**
  * @brief Add an unlock_task to the given task.
  *
  * @param s The #scheduler.
  * @param ta The unlocking #task.
  * @param tb The #task that will be unlocked.
+
  */
 void scheduler_addunlock(struct scheduler *s, struct task *ta,
                          struct task *tb) {
@@ -77,43 +109,240 @@ void scheduler_addunlock(struct scheduler *s, struct task *ta,
   const int ind = atomic_inc(&s->nr_unlocks);
 
   /* Does the buffer need to be grown? */
-  if (ind == s->size_unlocks) {
-    /* Allocate the new buffer. */
-    struct task **unlocks_new;
-    int *unlock_ind_new;
-    const int size_unlocks_new = s->size_unlocks * 2;
-    if ((unlocks_new = (struct task **)malloc(sizeof(struct task *) *
-                                              size_unlocks_new)) == NULL ||
-        (unlock_ind_new = (int *)malloc(sizeof(int) * size_unlocks_new)) ==
-            NULL)
-      error("Failed to re-allocate unlocks.");
-
-    /* Wait for all writes to the old buffer to complete. */
-    while (s->completed_unlock_writes < ind)
-      ;
-
-    /* Copy the buffers. */
-    memcpy(unlocks_new, s->unlocks, sizeof(struct task *) * ind);
-    memcpy(unlock_ind_new, s->unlock_ind, sizeof(int) * ind);
-    free(s->unlocks);
-    free(s->unlock_ind);
-    s->unlocks = unlocks_new;
-    s->unlock_ind = unlock_ind_new;
-
-    /* Publish the new buffer size. */
-    s->size_unlocks = size_unlocks_new;
-  }
+  if (ind == s->size_unlocks) scheduler_extend_unlocks(s);
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (ind > s->size_unlocks * 2)
+    message("unlocks guard enabled: %d / %d", ind, s->size_unlocks);
+#endif
 
   /* Wait for there to actually be space at my index. */
   while (ind > s->size_unlocks)
     ;
 
+  /* Guard against case when more than (old) s->size_unlocks unlocks
+   * are now pending. */
+  if (ind == s->size_unlocks) scheduler_extend_unlocks(s);
+
   /* Write the unlock to the scheduler. */
   s->unlocks[ind] = tb;
   s->unlock_ind[ind] = ta - s->tasks;
   atomic_inc(&s->completed_unlock_writes);
 }
 
+/**
+ * @brief compute the number of similar dependencies
+ *
+ * @param s The #scheduler
+ * @param ta The #task
+ * @param tb The dependent #task
+ *
+ * @return Number of dependencies
+ */
+int scheduler_get_number_relation(const struct scheduler *s,
+                                  const struct task *ta,
+                                  const struct task *tb) {
+
+  int count = 0;
+
+  /* loop over all tasks */
+  for (int i = 0; i < s->nr_tasks; i++) {
+    const struct task *ta_tmp = &s->tasks[i];
+
+    /* and their dependencies */
+    for (int j = 0; j < ta->nr_unlock_tasks; j++) {
+      const struct task *tb_tmp = ta->unlock_tasks[j];
+
+      if (ta->type == ta_tmp->type && ta->subtype == ta_tmp->subtype &&
+          tb->type == tb_tmp->type && tb->subtype == tb_tmp->subtype) {
+        count += 1;
+      }
+    }
+  }
+  return count;
+}
+
+/* Conservative number of dependencies per task type */
+#define MAX_NUMBER_DEP 128
+
+/**
+ * @brief Informations about all the task dependencies of
+ *   a single task.
+ */
+struct task_dependency {
+  /* Main task */
+  /* ID of the task */
+  int type_in;
+
+  /* ID of the subtask */
+  int subtype_in;
+
+  /* Is the task implicit */
+  int implicit_in;
+
+  /* Dependent task */
+  /* ID of the dependent task */
+  int type_out[MAX_NUMBER_DEP];
+
+  /* ID of the dependent subtask */
+  int subtype_out[MAX_NUMBER_DEP];
+
+  /* Is the dependent task implicit */
+  int implicit_out[MAX_NUMBER_DEP];
+
+  /* Statistics */
+  /* number of link between the two task type */
+  int number_link[MAX_NUMBER_DEP];
+
+  /* number of ranks having this relation */
+  int number_rank[MAX_NUMBER_DEP];
+};
+
+#ifdef WITH_MPI
+
+/**
+ * @brief Define the #task_dependency for MPI
+ *
+ * @param tstype The MPI_Datatype to initialize
+ */
+void task_dependency_define(MPI_Datatype *tstype) {
+
+  /* Define the variables */
+  const int count = 8;
+  int blocklens[count];
+  MPI_Datatype types[count];
+  MPI_Aint disps[count];
+
+  /* all the type are int */
+  for (int i = 0; i < count; i++) {
+    types[i] = MPI_INT;
+  }
+
+  /* Task in */
+  disps[0] = offsetof(struct task_dependency, type_in);
+  blocklens[0] = 1;
+  disps[1] = offsetof(struct task_dependency, subtype_in);
+  blocklens[1] = 1;
+  disps[2] = offsetof(struct task_dependency, implicit_in);
+  blocklens[2] = 1;
+
+  /* Task out */
+  disps[3] = offsetof(struct task_dependency, type_out);
+  blocklens[3] = MAX_NUMBER_DEP;
+  disps[4] = offsetof(struct task_dependency, subtype_out);
+  blocklens[4] = MAX_NUMBER_DEP;
+  disps[5] = offsetof(struct task_dependency, implicit_out);
+  blocklens[5] = MAX_NUMBER_DEP;
+
+  /* statistics */
+  disps[6] = offsetof(struct task_dependency, number_link);
+  blocklens[6] = MAX_NUMBER_DEP;
+  disps[7] = offsetof(struct task_dependency, number_rank);
+  blocklens[7] = MAX_NUMBER_DEP;
+
+  /* define it for MPI */
+  MPI_Type_create_struct(count, blocklens, disps, types, tstype);
+  MPI_Type_commit(tstype);
+}
+
+/**
+ * @brief Sum operator of #task_dependency for MPI
+ *
+ * @param in_p The #task_dependency to add
+ * @param out_p The #task_dependency where in_p is added
+ * @param len The length of the arrays
+ * @param type The MPI datatype
+ */
+void task_dependency_sum(void *in_p, void *out_p, int *len,
+                         MPI_Datatype *type) {
+
+  /* change pointer type */
+  struct task_dependency *in = (struct task_dependency *)in_p;
+  struct task_dependency *out = (struct task_dependency *)out_p;
+
+  /* Loop over all the current objects */
+  for (int i = 0; i < *len; i++) {
+
+    /* loop over all the object set in invals */
+    for (int j = 0; j < MAX_NUMBER_DEP; j++) {
+
+      /* Have we reached the end of the links? */
+      if (in[i].number_link[j] == -1) {
+        break;
+      }
+
+      /* get a few variables */
+      int tb_type = in[i].type_out[j];
+      int tb_subtype = in[i].subtype_out[j];
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check tasks */
+      if (tb_type >= task_type_count) {
+        error("Unknown task type %i", tb_type);
+      }
+
+      if (tb_subtype >= task_subtype_count) {
+        error("Unknown subtask type %i", tb_subtype);
+      }
+#endif
+
+      /* find the corresponding id */
+      int k = 0;
+      while (k < MAX_NUMBER_DEP) {
+        /* have we reached the end of the links? */
+        if (out[i].number_link[k] == -1) {
+          /* reset the counter in order to be safe */
+          out[i].number_link[k] = 0;
+          out[i].number_rank[k] = 0;
+
+          /* set the relation */
+          out[i].type_in = in[i].type_in;
+          out[i].subtype_in = in[i].subtype_in;
+          out[i].implicit_in = in[i].implicit_in;
+
+          out[i].type_out[k] = in[i].type_out[j];
+          out[i].subtype_out[k] = in[i].subtype_out[j];
+          out[i].implicit_out[k] = in[i].implicit_out[j];
+          break;
+        }
+
+        /* do we have the same relation? */
+        if (out[i].type_out[k] == tb_type &&
+            out[i].subtype_out[k] == tb_subtype) {
+          break;
+        }
+
+        k++;
+      }
+
+      /* Check if we are still in the memory */
+      if (k == MAX_NUMBER_DEP) {
+        error("Not enough memory, please increase MAX_NUMBER_DEP");
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* Check if correct relation */
+      if (out[i].type_in != in[i].type_in ||
+          out[i].subtype_in != in[i].subtype_in ||
+          out[i].implicit_in != in[i].implicit_in ||
+          out[i].type_out[k] != in[i].type_out[j] ||
+          out[i].subtype_out[k] != in[i].subtype_out[j] ||
+          out[i].implicit_out[k] != in[i].implicit_out[j]) {
+        error("Tasks do not correspond");
+      }
+#endif
+
+      /* sum the contributions */
+      out[i].number_link[k] += in[i].number_link[j];
+      out[i].number_rank[k] += in[i].number_rank[j];
+    }
+  }
+
+  return;
+}
+
+#endif  // WITH_MPI
+
 /**
  * @brief Write a dot file with the task dependencies.
  *
@@ -127,196 +356,178 @@ void scheduler_write_dependencies(struct scheduler *s, int verbose) {
 
   const ticks tic = getticks();
 
-  /* Conservative number of dependencies per task type */
-  const int max_nber_dep = 128;
-
   /* Number of possible relations between tasks */
-  const int nber_relation =
-      2 * task_type_count * task_subtype_count * max_nber_dep;
+  const int nber_tasks = task_type_count * task_subtype_count;
 
-  /* To get the table of max_nber_dep for a task:
-   * ind = (ta * task_subtype_count + sa) * max_nber_dep * 2
+  /* To get the table for a task:
+   * ind = (ta * task_subtype_count + sa)
    * where ta is the value of task_type and sa is the value of
    * task_subtype  */
-  int *table = (int *)malloc(nber_relation * sizeof(int));
-  if (table == NULL)
-    error("Error allocating memory for task-dependency graph.");
-
-  /* Reset everything */
-  for (int i = 0; i < nber_relation; i++) table[i] = -1;
+  struct task_dependency *task_dep = (struct task_dependency *)malloc(
+      nber_tasks * sizeof(struct task_dependency));
 
-  /* Create file */
-  char filename[200] = "dependency_graph.dot";
-  FILE *f = fopen(filename, "w");
-  if (f == NULL) error("Error opening dependency graph file.");
+  if (task_dep == NULL)
+    error("Error allocating memory for task-dependency graph (table).");
 
-  /* Write header */
-  fprintf(f, "digraph task_dep {\n");
-  fprintf(f, "label=\"Task dependencies for SWIFT %s\";\n", git_revision());
-  fprintf(f, "\t compound=true;\n");
-  fprintf(f, "\t ratio=0.66;\n");
-  fprintf(f, "\t node[nodesep=0.15];\n");
+  /* Reset counter */
+  for (int i = 0; i < nber_tasks; i++) {
+    for (int j = 0; j < MAX_NUMBER_DEP; j++) {
+      /* Use number_link as indicator of the existance of a relation */
+      task_dep[i].number_link[j] = -1;
+    }
+  }
 
   /* loop over all tasks */
   for (int i = 0; i < s->nr_tasks; i++) {
     const struct task *ta = &s->tasks[i];
 
+    /* Current index */
+    const int ind = ta->type * task_subtype_count + ta->subtype;
+
+    struct task_dependency *cur = &task_dep[ind];
+
+    /* Set ta */
+    cur->type_in = ta->type;
+    cur->subtype_in = ta->subtype;
+    cur->implicit_in = ta->implicit;
+
     /* and their dependencies */
     for (int j = 0; j < ta->nr_unlock_tasks; j++) {
       const struct task *tb = ta->unlock_tasks[j];
 
-      /* check if dependency already written */
-      int written = 0;
-
-      /* Current index */
-      int ind = ta->type * task_subtype_count + ta->subtype;
-      ind *= 2 * max_nber_dep;
-
       int k = 0;
-      int *cur = &table[ind];
-      while (k < max_nber_dep) {
+      while (k < MAX_NUMBER_DEP) {
 
         /* not written yet */
-        if (cur[0] == -1) {
-          cur[0] = tb->type;
-          cur[1] = tb->subtype;
+        if (cur->number_link[k] == -1) {
+          /* set tb */
+          cur->type_out[k] = tb->type;
+          cur->subtype_out[k] = tb->subtype;
+          cur->implicit_out[k] = tb->implicit;
+
+          /* statistics */
+          const int count = scheduler_get_number_relation(s, ta, tb);
+          cur->number_link[k] = count;
+          cur->number_rank[k] = 1;
+
           break;
         }
 
         /* already written */
-        if (cur[0] == tb->type && cur[1] == tb->subtype) {
-          written = 1;
+        if (cur->type_out[k] == tb->type &&
+            cur->subtype_out[k] == tb->subtype) {
           break;
         }
 
         k += 1;
-        cur = &cur[3];
       }
 
-      /* max_nber_dep is too small */
-      if (k == max_nber_dep)
-        error("Not enough memory, please increase max_nber_dep");
+      /* MAX_NUMBER_DEP is too small */
+      if (k == MAX_NUMBER_DEP)
+        error("Not enough memory, please increase MAX_NUMBER_DEP");
+    }
+  }
 
-      /* Not written yet => write it */
-      if (!written) {
+#ifdef WITH_MPI
+  /* create MPI operator */
+  MPI_Datatype data_type;
+  task_dependency_define(&data_type);
 
-        /* text to write */
-        char ta_name[200];
-        char tb_name[200];
+  MPI_Op sum;
+  MPI_Op_create(task_dependency_sum, /* commute */ 1, &sum);
 
-        /* construct line */
-        if (ta->subtype == task_subtype_none)
-          sprintf(ta_name, "%s", taskID_names[ta->type]);
-        else
-          sprintf(ta_name, "\"%s %s\"", taskID_names[ta->type],
-                  subtaskID_names[ta->subtype]);
+  /* create recv buffer */
+  struct task_dependency *recv = NULL;
 
-        if (tb->subtype == task_subtype_none)
-          sprintf(tb_name, "%s", taskID_names[tb->type]);
-        else
-          sprintf(tb_name, "\"%s %s\"", taskID_names[tb->type],
-                  subtaskID_names[tb->subtype]);
-
-        /* Write to the ffile */
-        fprintf(f, "\t %s->%s;\n", ta_name, tb_name);
-
-        /* Change colour of implicit tasks */
-        if (ta->implicit)
-          fprintf(f, "\t %s [style = filled];\n\t %s [color = lightgrey];\n",
-                  ta_name, ta_name);
-        if (tb->implicit)
-          fprintf(f, "\t %s [style = filled];\n\t %s [color = lightgrey];\n",
-                  tb_name, tb_name);
-
-        /* Change shape of MPI communications */
-        if (ta->type == task_type_send || ta->type == task_type_recv)
-          fprintf(f, "\t \"%s %s\" [shape = diamond];\n",
-                  taskID_names[ta->type], subtaskID_names[ta->subtype]);
-        if (tb->type == task_type_send || tb->type == task_type_recv)
-          fprintf(f, "\t \"%s %s\" [shape = diamond];\n",
-                  taskID_names[tb->type], subtaskID_names[tb->subtype]);
+  if (s->nodeID == 0) {
+    recv = (struct task_dependency *)malloc(nber_tasks *
+                                            sizeof(struct task_dependency));
+
+    /* reset counter */
+    for (int i = 0; i < nber_tasks; i++) {
+      for (int j = 0; j < MAX_NUMBER_DEP; j++) {
+        /* Use number_link as indicator of the existance of a relation */
+        recv[i].number_link[j] = -1;
       }
     }
   }
 
-  int density_cluster[4] = {0};
-  int gradient_cluster[4] = {0};
-  int force_cluster[4] = {0};
-  int gravity_cluster[5] = {0};
+  /* Do the reduction */
+  int test =
+      MPI_Reduce(task_dep, recv, nber_tasks, data_type, sum, 0, MPI_COMM_WORLD);
+  if (test != MPI_SUCCESS) error("MPI reduce failed");
 
-  /* Check whether we need to construct a group of tasks */
-  for (int type = 0; type < task_type_count; ++type) {
+  /* free some memory */
+  if (s->nodeID == 0) {
+    free(task_dep);
+    task_dep = recv;
+  }
+#endif
 
-    for (int subtype = 0; subtype < task_subtype_count; ++subtype) {
+  if (s->nodeID == 0) {
+    /* Create file */
+    char *filename = "dependency_graph.csv";
+    FILE *f = fopen(filename, "w");
+    if (f == NULL) error("Error opening dependency graph file.");
+
+    /* Write header */
+    fprintf(f, "# %s\n", git_revision());
+    fprintf(
+        f,
+        "task_in,task_out,implicit_in,implicit_out,mpi_in,mpi_out,cluster_in,"
+        "cluster_out,number_link,number_rank\n");
+
+    for (int i = 0; i < nber_tasks; i++) {
+      for (int j = 0; j < MAX_NUMBER_DEP; j++) {
+        /* Does this link exists */
+        if (task_dep[i].number_link[j] == -1) {
+          continue;
+        }
 
-      const int ind = 2 * (type * task_subtype_count + subtype) * max_nber_dep;
+        /* Define a few variables */
+        const int ta_type = task_dep[i].type_in;
+        const int ta_subtype = task_dep[i].subtype_in;
+        const int ta_implicit = task_dep[i].implicit_in;
 
-      /* Does this task/sub-task exist? */
-      if (table[ind] != -1) {
+        const int tb_type = task_dep[i].type_out[j];
+        const int tb_subtype = task_dep[i].subtype_out[j];
+        const int tb_implicit = task_dep[i].implicit_out[j];
 
-        for (int k = 0; k < 4; ++k) {
-          if (type == task_type_self + k && subtype == task_subtype_density)
-            density_cluster[k] = 1;
-          if (type == task_type_self + k && subtype == task_subtype_gradient)
-            gradient_cluster[k] = 1;
-          if (type == task_type_self + k && subtype == task_subtype_force)
-            force_cluster[k] = 1;
-          if (type == task_type_self + k && subtype == task_subtype_grav)
-            gravity_cluster[k] = 1;
-        }
-        if (type == task_type_grav_mesh) gravity_cluster[2] = 1;
-        if (type == task_type_grav_long_range) gravity_cluster[3] = 1;
-        if (type == task_type_grav_mm) gravity_cluster[4] = 1;
+        const int count = task_dep[i].number_link[j];
+        const int number_rank = task_dep[i].number_rank[j];
+
+        /* text to write */
+        char ta_name[200];
+        char tb_name[200];
+
+        /* construct line */
+        task_get_full_name(ta_type, ta_subtype, ta_name);
+        task_get_full_name(tb_type, tb_subtype, tb_name);
+
+        /* Check if MPI */
+        int ta_mpi = 0;
+        if (ta_type == task_type_send || ta_type == task_type_recv) ta_mpi = 1;
+
+        int tb_mpi = 0;
+        if (tb_type == task_type_send || tb_type == task_type_recv) tb_mpi = 1;
+
+        /* Get group name */
+        char ta_cluster[20];
+        char tb_cluster[20];
+        task_get_group_name(ta_type, ta_subtype, ta_cluster);
+        task_get_group_name(tb_type, tb_subtype, tb_cluster);
+
+        fprintf(f, "%s,%s,%d,%d,%d,%d,%s,%s,%d,%d\n", ta_name, tb_name,
+                ta_implicit, tb_implicit, ta_mpi, tb_mpi, ta_cluster,
+                tb_cluster, count, number_rank);
       }
     }
+    /* Close the file */
+    fclose(f);
   }
 
-  /* Make a cluster for the density tasks */
-  fprintf(f, "\t subgraph cluster0{\n");
-  fprintf(f, "\t\t label=\"\";\n");
-  for (int k = 0; k < 4; ++k)
-    if (density_cluster[k])
-      fprintf(f, "\t\t \"%s %s\";\n", taskID_names[task_type_self + k],
-              subtaskID_names[task_subtype_density]);
-  fprintf(f, "\t};\n");
-
-  /* Make a cluster for the force tasks */
-  fprintf(f, "\t subgraph cluster1{\n");
-  fprintf(f, "\t\t label=\"\";\n");
-  for (int k = 0; k < 4; ++k)
-    if (force_cluster[k])
-      fprintf(f, "\t\t \"%s %s\";\n", taskID_names[task_type_self + k],
-              subtaskID_names[task_subtype_force]);
-  fprintf(f, "\t};\n");
-
-  /* Make a cluster for the gradient tasks */
-  fprintf(f, "\t subgraph cluster2{\n");
-  fprintf(f, "\t\t label=\"\";\n");
-  for (int k = 0; k < 4; ++k)
-    if (gradient_cluster[k])
-      fprintf(f, "\t\t \"%s %s\";\n", taskID_names[task_type_self + k],
-              subtaskID_names[task_subtype_gradient]);
-  fprintf(f, "\t};\n");
-
-  /* Make a cluster for the gravity tasks */
-  fprintf(f, "\t subgraph cluster3{\n");
-  fprintf(f, "\t\t label=\"\";\n");
-  for (int k = 0; k < 2; ++k)
-    if (gravity_cluster[k])
-      fprintf(f, "\t\t \"%s %s\";\n", taskID_names[task_type_self + k],
-              subtaskID_names[task_subtype_grav]);
-  if (gravity_cluster[2])
-    fprintf(f, "\t\t %s;\n", taskID_names[task_type_grav_mesh]);
-  if (gravity_cluster[3])
-    fprintf(f, "\t\t %s;\n", taskID_names[task_type_grav_long_range]);
-  if (gravity_cluster[4])
-    fprintf(f, "\t\t %s;\n", taskID_names[task_type_grav_mm]);
-  fprintf(f, "\t};\n");
-
   /* Be clean */
-  fprintf(f, "}");
-  fclose(f);
-  free(table);
+  free(task_dep);
 
   if (verbose)
     message("Printing task graph took %.3f %s.",
@@ -331,6 +542,11 @@ void scheduler_write_dependencies(struct scheduler *s, int verbose) {
  */
 static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
 
+  /* Are we considering both stars and hydro when splitting? */
+  /* Note this is not very clean as the scheduler should not really
+     access the engine... */
+  const int with_feedback = (s->space->e->policy & engine_policy_feedback);
+
   /* Iterate on this task until we're done with it. */
   int redo = 1;
   while (redo) {
@@ -338,9 +554,20 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
     /* Reset the redo flag. */
     redo = 0;
 
-    /* Non-splittable task? */
-    if ((t->ci == NULL) || (t->type == task_type_pair && t->cj == NULL) ||
-        t->ci->count == 0 || (t->cj != NULL && t->cj->count == 0)) {
+    /* Is this a non-empty self-task? */
+    const int is_self =
+        (t->type == task_type_self) && (t->ci != NULL) &&
+        ((t->ci->hydro.count > 0) || (with_feedback && t->ci->stars.count > 0));
+
+    /* Is this a non-empty pair-task? */
+    const int is_pair =
+        (t->type == task_type_pair) && (t->ci != NULL) && (t->cj != NULL) &&
+        ((t->ci->hydro.count > 0) ||
+         (with_feedback && t->ci->stars.count > 0)) &&
+        ((t->cj->hydro.count > 0) || (with_feedback && t->cj->stars.count > 0));
+
+    /* Empty task? */
+    if (!is_self && !is_pair) {
       t->type = task_type_none;
       t->subtype = task_subtype_none;
       t->cj = NULL;
@@ -364,7 +591,7 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
       if (cell_can_split_self_hydro_task(ci)) {
 
         /* Make a sub? */
-        if (scheduler_dosub && ci->count < space_subsize_self_hydro) {
+        if (scheduler_dosub && ci->hydro.count < space_subsize_self_hydro) {
 
           /* convert to a self-subtask. */
           t->type = task_type_sub_self;
@@ -379,24 +606,46 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
           int first_child = 0;
           while (ci->progeny[first_child] == NULL) first_child++;
           t->ci = ci->progeny[first_child];
-          for (int k = first_child + 1; k < 8; k++)
-            if (ci->progeny[k] != NULL && ci->progeny[k]->count)
+          for (int k = first_child + 1; k < 8; k++) {
+
+            /* Do we have a non-empty progenitor? */
+            if (ci->progeny[k] != NULL &&
+                (ci->progeny[k]->hydro.count ||
+                 (with_feedback && ci->progeny[k]->stars.count))) {
+
               scheduler_splittask_hydro(
                   scheduler_addtask(s, task_type_self, t->subtype, 0, 0,
                                     ci->progeny[k], NULL),
                   s);
+            }
+          }
 
           /* Make a task for each pair of progeny */
-          for (int j = 0; j < 8; j++)
-            if (ci->progeny[j] != NULL && ci->progeny[j]->count)
-              for (int k = j + 1; k < 8; k++)
-                if (ci->progeny[k] != NULL && ci->progeny[k]->count)
+          for (int j = 0; j < 8; j++) {
+
+            /* Do we have a non-empty progenitor? */
+            if (ci->progeny[j] != NULL &&
+                (ci->progeny[j]->hydro.count ||
+                 (with_feedback && ci->progeny[j]->stars.count))) {
+
+              for (int k = j + 1; k < 8; k++) {
+
+                /* Do we have a second non-empty progenitor? */
+                if (ci->progeny[k] != NULL &&
+                    (ci->progeny[k]->hydro.count ||
+                     (with_feedback && ci->progeny[k]->stars.count))) {
+
                   scheduler_splittask_hydro(
                       scheduler_addtask(s, task_type_pair, t->subtype,
                                         sub_sid_flag[j][k], 0, ci->progeny[j],
                                         ci->progeny[k]),
                       s);
+                }
+              }
+            }
+          }
         }
+
       } /* Cell is split */
 
     } /* Self interaction */
@@ -419,13 +668,20 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
       double shift[3];
       const int sid = space_getsid(s->space, &ci, &cj, shift);
 
+#ifdef SWIFT_DEBUG_CHECKS
+      if (sid != t->flags)
+        error("Got pair task with incorrect flags: sid=%d flags=%lld", sid,
+              t->flags);
+#endif
+
       /* Should this task be split-up? */
       if (cell_can_split_pair_hydro_task(ci) &&
           cell_can_split_pair_hydro_task(cj)) {
 
         /* Replace by a single sub-task? */
         if (scheduler_dosub && /* Use division to avoid integer overflow. */
-            ci->count * sid_scale[sid] < space_subsize_pair_hydro / cj->count &&
+            ci->hydro.count * sid_scale[sid] <
+                space_subsize_pair_hydro / cj->hydro.count &&
             !sort_is_corner(sid)) {
 
           /* Make this task a sub task. */
@@ -774,18 +1030,18 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
 
         /* Otherwise, break it up if it is too large? */
       } else if (scheduler_doforcesplit && ci->split && cj->split &&
-                 (ci->count > space_maxsize / cj->count)) {
+                 (ci->hydro.count > space_maxsize / cj->hydro.count)) {
 
-        // message( "force splitting pair with %i and %i parts." , ci->count ,
-        // cj->count );
+        // message( "force splitting pair with %i and %i parts." ,
+        // ci->hydro.count , cj->hydro.count );
 
         /* Replace the current task. */
         t->type = task_type_none;
 
         for (int j = 0; j < 8; j++)
-          if (ci->progeny[j] != NULL && ci->progeny[j]->count)
+          if (ci->progeny[j] != NULL && ci->progeny[j]->hydro.count)
             for (int k = 0; k < 8; k++)
-              if (cj->progeny[k] != NULL && cj->progeny[k]->count) {
+              if (cj->progeny[k] != NULL && cj->progeny[k]->hydro.count) {
                 struct task *tl =
                     scheduler_addtask(s, task_type_pair, t->subtype, 0, 0,
                                       ci->progeny[j], cj->progeny[k]);
@@ -805,11 +1061,8 @@ static void scheduler_splittask_hydro(struct task *t, struct scheduler *s) {
  */
 static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
 
-/* Temporarily prevent MPI here */
-#ifndef WITH_MPI
   const struct space *sp = s->space;
   struct engine *e = sp->e;
-#endif
 
   /* Iterate on this task until we're done with it. */
   int redo = 1;
@@ -839,13 +1092,10 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
         break;
       }
 
-/* Temporarily prevent MPI here */
-#ifndef WITH_MPI
-
       /* Should we split this task? */
       if (cell_can_split_self_gravity_task(ci)) {
 
-        if (scheduler_dosub && ci->gcount < space_subsize_self_grav) {
+        if (scheduler_dosub && ci->grav.count < space_subsize_self_grav) {
 
           /* Otherwise, split it. */
         } else {
@@ -880,7 +1130,6 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
           } /* Self-gravity only */
         }   /* Make tasks explicitly */
       }     /* Cell is split */
-#endif      /* WITH_MPI */
     }       /* Self interaction */
 
     /* Pair interaction? */
@@ -896,68 +1145,70 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
         break;
       }
 
-/* Temporarily prevent MPI here */
-#ifndef WITH_MPI
-
-      /* Should we replace it with an M-M task? */
-      if (cell_can_use_pair_mm(ci, cj, e, sp)) {
-
-        t->type = task_type_grav_mm;
-        t->subtype = task_subtype_none;
-
-        /* Since this task will not be split, we can already link it */
-        atomic_inc(&ci->nr_tasks);
-        atomic_inc(&cj->nr_tasks);
-        engine_addlink(e, &ci->grav, t);
-        engine_addlink(e, &cj->grav, t);
-        break;
-      }
-
       /* Should this task be split-up? */
       if (cell_can_split_pair_gravity_task(ci) &&
           cell_can_split_pair_gravity_task(cj)) {
 
+        const long long gcount_i = ci->grav.count;
+        const long long gcount_j = cj->grav.count;
+
         /* Replace by a single sub-task? */
-        if (scheduler_dosub && /* Use division to avoid integer overflow. */
-            ci->gcount < space_subsize_pair_grav / cj->gcount) {
+        if (scheduler_dosub &&
+            gcount_i * gcount_j < ((long long)space_subsize_pair_grav)) {
 
           /* Otherwise, split it. */
         } else {
 
-          /* Take a step back (we're going to recycle the current task)... */
-          redo = 1;
-
-          /* Find the first non-empty childrens of the cells */
-          int first_ci_child = 0, first_cj_child = 0;
-          while (ci->progeny[first_ci_child] == NULL) first_ci_child++;
-          while (cj->progeny[first_cj_child] == NULL) first_cj_child++;
-
-          /* Recycle the current pair */
-          t->ci = ci->progeny[first_ci_child];
-          t->cj = cj->progeny[first_cj_child];
+          /* Turn the task into a M-M task that will take care of all the
+           * progeny pairs */
+          t->type = task_type_grav_mm;
+          t->subtype = task_subtype_none;
+          t->flags = 0;
 
           /* Make a task for every other pair of progeny */
-          for (int i = first_ci_child; i < 8; i++) {
+          for (int i = 0; i < 8; i++) {
             if (ci->progeny[i] != NULL) {
-              for (int j = first_cj_child; j < 8; j++) {
+              for (int j = 0; j < 8; j++) {
                 if (cj->progeny[j] != NULL) {
 
-                  /* Skip the recycled pair */
-                  if (i == first_ci_child && j == first_cj_child) continue;
+                  /* Can we use a M-M interaction here? */
+                  if (cell_can_use_pair_mm_rebuild(ci->progeny[i],
+                                                   cj->progeny[j], e, sp)) {
 
-                  scheduler_splittask_gravity(
-                      scheduler_addtask(s, task_type_pair, t->subtype, 0, 0,
-                                        ci->progeny[i], cj->progeny[j]),
-                      s);
+                    /* Flag this pair as being treated by the M-M task.
+                     * We use the 64 bits in the task->flags field to store
+                     * this information. The corresponding taks will unpack
+                     * the information and operate according to the choices
+                     * made here. */
+                    const int flag = i * 8 + j;
+                    t->flags |= (1ULL << flag);
+
+                  } else {
+
+                    /* Ok, we actually have to create a task */
+                    scheduler_splittask_gravity(
+                        scheduler_addtask(s, task_type_pair, task_subtype_grav,
+                                          0, 0, ci->progeny[i], cj->progeny[j]),
+                        s);
+                  }
                 }
               }
             }
           }
+
+          /* Can none of the progenies use M-M calculations? */
+          if (t->flags == 0) {
+            t->type = task_type_none;
+            t->subtype = task_subtype_none;
+            t->ci = NULL;
+            t->cj = NULL;
+            t->skip = 1;
+          }
+
         } /* Split the pair */
       }
-#endif /* WITH_MPI */
-    }  /* pair interaction? */
-  }    /* iterate over the current task. */
+    } /* pair interaction? */
+  }   /* iterate over the current task. */
 }
 
 /**
@@ -1049,9 +1300,9 @@ struct task *scheduler_addtask(struct scheduler *s, enum task_types type,
   t->nr_unlock_tasks = 0;
 #ifdef SWIFT_DEBUG_TASKS
   t->rid = -1;
+#endif
   t->tic = 0;
   t->toc = 0;
-#endif
 
   /* Add an index for it. */
   // lock_lock( &s->lock );
@@ -1273,28 +1524,38 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
   /* Run through the tasks backwards and set their weights. */
   for (int k = nr_tasks - 1; k >= 0; k--) {
     struct task *t = &tasks[tid[k]];
+    float cost = 0.f;
     t->weight = 0.f;
+
     for (int j = 0; j < t->nr_unlock_tasks; j++)
       if (t->unlock_tasks[j]->weight > t->weight)
         t->weight = t->unlock_tasks[j]->weight;
-    float cost = 0.f;
 
-    const float count_i = (t->ci != NULL) ? t->ci->count : 0.f;
-    const float count_j = (t->cj != NULL) ? t->cj->count : 0.f;
-    const float gcount_i = (t->ci != NULL) ? t->ci->gcount : 0.f;
-    const float gcount_j = (t->cj != NULL) ? t->cj->gcount : 0.f;
+    const float count_i = (t->ci != NULL) ? t->ci->hydro.count : 0.f;
+    const float count_j = (t->cj != NULL) ? t->cj->hydro.count : 0.f;
+    const float gcount_i = (t->ci != NULL) ? t->ci->grav.count : 0.f;
+    const float gcount_j = (t->cj != NULL) ? t->cj->grav.count : 0.f;
+    const float scount_i = (t->ci != NULL) ? t->ci->stars.count : 0.f;
+    const float scount_j = (t->cj != NULL) ? t->cj->stars.count : 0.f;
 
     switch (t->type) {
       case task_type_sort:
         cost = wscale * intrinsics_popcount(t->flags) * count_i *
-               (sizeof(int) * 8 - intrinsics_clz(t->ci->count));
+               (sizeof(int) * 8 - intrinsics_clz(t->ci->hydro.count));
+        break;
+
+      case task_type_stars_sort:
+        cost = wscale * intrinsics_popcount(t->flags) * scount_i *
+               (sizeof(int) * 8 - intrinsics_clz(t->ci->stars.count));
         break;
 
       case task_type_self:
-        if (t->subtype == task_subtype_grav)
+        if (t->subtype == task_subtype_grav) {
           cost = 1.f * (wscale * gcount_i) * gcount_i;
-        else if (t->subtype == task_subtype_external_grav)
+        } else if (t->subtype == task_subtype_external_grav)
           cost = 1.f * wscale * gcount_i;
+        else if (t->subtype == task_subtype_stars_density)
+          cost = 1.f * wscale * scount_i * count_i;
         else
           cost = 1.f * (wscale * count_i) * count_i;
         break;
@@ -1305,6 +1566,14 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
             cost = 3.f * (wscale * gcount_i) * gcount_j;
           else
             cost = 2.f * (wscale * gcount_i) * gcount_j;
+        } else if (t->subtype == task_subtype_stars_density) {
+          if (t->ci->nodeID != nodeID)
+            cost = 3.f * wscale * count_i * scount_j * sid_scale[t->flags];
+          else if (t->cj->nodeID != nodeID)
+            cost = 3.f * wscale * scount_i * count_j * sid_scale[t->flags];
+          else
+            cost = 2.f * wscale * (scount_i * count_j + scount_j * count_i) *
+                   sid_scale[t->flags];
         } else {
           if (t->ci->nodeID != nodeID || t->cj->nodeID != nodeID)
             cost = 3.f * (wscale * count_i) * count_j * sid_scale[t->flags];
@@ -1314,27 +1583,43 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
         break;
 
       case task_type_sub_pair:
-        if (t->ci->nodeID != nodeID || t->cj->nodeID != nodeID) {
-          if (t->flags < 0)
-            cost = 3.f * (wscale * count_i) * count_j;
-          else
-            cost = 3.f * (wscale * count_i) * count_j * sid_scale[t->flags];
+#ifdef SWIFT_DEBUG_CHECKS
+        if (t->flags < 0) error("Negative flag value!");
+#endif
+        if (t->subtype == task_subtype_stars_density) {
+          if (t->ci->nodeID != nodeID) {
+            cost = 3.f * (wscale * count_i) * scount_j * sid_scale[t->flags];
+          } else if (t->cj->nodeID != nodeID) {
+            cost = 3.f * (wscale * scount_i) * count_j * sid_scale[t->flags];
+          } else {
+            cost = 2.f * wscale * (scount_i * count_j + scount_j * count_i) *
+                   sid_scale[t->flags];
+          }
+
         } else {
-          if (t->flags < 0)
-            cost = 2.f * (wscale * count_i) * count_j;
-          else
+          if (t->ci->nodeID != nodeID || t->cj->nodeID != nodeID) {
+            cost = 3.f * (wscale * count_i) * count_j * sid_scale[t->flags];
+          } else {
             cost = 2.f * (wscale * count_i) * count_j * sid_scale[t->flags];
+          }
         }
         break;
 
       case task_type_sub_self:
-        cost = 1.f * (wscale * count_i) * count_i;
+        if (t->subtype == task_subtype_stars_density) {
+          cost = 1.f * (wscale * scount_i) * count_i;
+        } else {
+          cost = 1.f * (wscale * count_i) * count_i;
+        }
         break;
       case task_type_ghost:
-        if (t->ci == t->ci->super_hydro) cost = wscale * count_i;
+        if (t->ci == t->ci->hydro.super) cost = wscale * count_i;
         break;
       case task_type_extra_ghost:
-        if (t->ci == t->ci->super_hydro) cost = wscale * count_i;
+        if (t->ci == t->ci->hydro.super) cost = wscale * count_i;
+        break;
+      case task_type_stars_ghost:
+        if (t->ci == t->ci->hydro.super) cost = wscale * scount_i;
         break;
       case task_type_drift_part:
         cost = wscale * count_i;
@@ -1354,8 +1639,11 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
       case task_type_grav_mm:
         cost = wscale * (gcount_i + gcount_j);
         break;
-      case task_type_end_force:
-        cost = wscale * count_i + wscale * gcount_i;
+      case task_type_end_hydro_force:
+        cost = wscale * count_i;
+        break;
+      case task_type_end_grav_force:
+        cost = wscale * gcount_i;
         break;
       case task_type_kick1:
         cost = wscale * count_i + wscale * gcount_i;
@@ -1382,10 +1670,6 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
         cost = 0;
         break;
     }
-
-#if defined(WITH_MPI) && defined(HAVE_METIS)
-    t->cost = cost;
-#endif
     t->weight += cost;
   }
 
@@ -1458,14 +1742,14 @@ void scheduler_enqueue_mapper(void *map_data, int num_elements,
  */
 void scheduler_start(struct scheduler *s) {
 
-/* Reset all task debugging timers */
-#ifdef SWIFT_DEBUG_TASKS
+  /* Reset all task timers. */
   for (int i = 0; i < s->nr_tasks; ++i) {
     s->tasks[i].tic = 0;
     s->tasks[i].toc = 0;
+#ifdef SWIFT_DEBUG_TASKS
     s->tasks[i].rid = -1;
-  }
 #endif
+  }
 
   /* Re-wait the tasks. */
   if (s->active_count > 1000) {
@@ -1531,20 +1815,23 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
       case task_type_sub_self:
         if (t->subtype == task_subtype_grav ||
             t->subtype == task_subtype_external_grav)
-          qid = t->ci->super_gravity->owner;
+          qid = t->ci->grav.super->owner;
         else
-          qid = t->ci->super_hydro->owner;
+          qid = t->ci->hydro.super->owner;
         break;
       case task_type_sort:
       case task_type_ghost:
       case task_type_drift_part:
-        qid = t->ci->super_hydro->owner;
+        qid = t->ci->hydro.super->owner;
         break;
       case task_type_drift_gpart:
-        qid = t->ci->super_gravity->owner;
+        qid = t->ci->grav.super->owner;
         break;
       case task_type_kick1:
       case task_type_kick2:
+      case task_type_stars_ghost:
+      case task_type_logger:
+      case task_type_stars_sort:
       case task_type_timestep:
         qid = t->ci->super->owner;
         break;
@@ -1559,34 +1846,31 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
 #ifdef WITH_MPI
         if (t->subtype == task_subtype_tend) {
           t->buff = (struct pcell_step *)malloc(sizeof(struct pcell_step) *
-                                                t->ci->pcell_size);
-          err = MPI_Irecv(
-              t->buff, t->ci->pcell_size * sizeof(struct pcell_step), MPI_BYTE,
-              t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype], &t->req);
+                                                t->ci->mpi.pcell_size);
+          err = MPI_Irecv(t->buff,
+                          t->ci->mpi.pcell_size * sizeof(struct pcell_step),
+                          MPI_BYTE, t->ci->nodeID, t->flags,
+                          subtaskMPI_comms[t->subtype], &t->req);
         } else if (t->subtype == task_subtype_xv ||
                    t->subtype == task_subtype_rho ||
                    t->subtype == task_subtype_gradient) {
-          err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type,
+          err = MPI_Irecv(t->ci->hydro.parts, t->ci->hydro.count, part_mpi_type,
                           t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
                           &t->req);
-          // message( "receiving %i parts with tag=%i from %i to %i." ,
-          //     t->ci->count , t->flags , t->ci->nodeID , s->nodeID );
-          // fflush(stdout);
         } else if (t->subtype == task_subtype_gpart) {
-          err = MPI_Irecv(t->ci->gparts, t->ci->gcount, gpart_mpi_type,
+          err = MPI_Irecv(t->ci->grav.parts, t->ci->grav.count, gpart_mpi_type,
                           t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
                           &t->req);
         } else if (t->subtype == task_subtype_spart) {
-          err = MPI_Irecv(t->ci->sparts, t->ci->scount, spart_mpi_type,
-                          t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
-                          &t->req);
+          err = MPI_Irecv(t->ci->stars.parts, t->ci->stars.count,
+                          spart_mpi_type, t->ci->nodeID, t->flags,
+                          subtaskMPI_comms[t->subtype], &t->req);
         } else if (t->subtype == task_subtype_multipole) {
           t->buff = (struct gravity_tensors *)malloc(
-              sizeof(struct gravity_tensors) * t->ci->pcell_size);
-          err = MPI_Irecv(t->buff,
-                          sizeof(struct gravity_tensors) * t->ci->pcell_size,
-                          MPI_BYTE, t->ci->nodeID, t->flags,
-                          subtaskMPI_comms[t->subtype], &t->req);
+              sizeof(struct gravity_tensors) * t->ci->mpi.pcell_size);
+          err = MPI_Irecv(t->buff, t->ci->mpi.pcell_size, multipole_mpi_type,
+                          t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
+                          &t->req);
         } else {
           error("Unknown communication sub-type");
         }
@@ -1602,59 +1886,56 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
 #ifdef WITH_MPI
         if (t->subtype == task_subtype_tend) {
           t->buff = (struct pcell_step *)malloc(sizeof(struct pcell_step) *
-                                                t->ci->pcell_size);
+                                                t->ci->mpi.pcell_size);
           cell_pack_end_step(t->ci, (struct pcell_step *)t->buff);
-          if ((t->ci->pcell_size * sizeof(struct pcell_step)) >
+          if ((t->ci->mpi.pcell_size * sizeof(struct pcell_step)) >
               s->mpi_message_limit)
             err = MPI_Isend(t->buff,
-                            t->ci->pcell_size * sizeof(struct pcell_step),
+                            t->ci->mpi.pcell_size * sizeof(struct pcell_step),
                             MPI_BYTE, t->cj->nodeID, t->flags,
                             subtaskMPI_comms[t->subtype], &t->req);
           else
             err = MPI_Issend(t->buff,
-                             t->ci->pcell_size * sizeof(struct pcell_step),
+                             t->ci->mpi.pcell_size * sizeof(struct pcell_step),
                              MPI_BYTE, t->cj->nodeID, t->flags,
                              subtaskMPI_comms[t->subtype], &t->req);
         } else if (t->subtype == task_subtype_xv ||
                    t->subtype == task_subtype_rho ||
                    t->subtype == task_subtype_gradient) {
-          if ((t->ci->count * sizeof(struct part)) > s->mpi_message_limit)
-            err = MPI_Isend(t->ci->parts, t->ci->count, part_mpi_type,
-                            t->cj->nodeID, t->flags,
+          if ((t->ci->hydro.count * sizeof(struct part)) > s->mpi_message_limit)
+            err = MPI_Isend(t->ci->hydro.parts, t->ci->hydro.count,
+                            part_mpi_type, t->cj->nodeID, t->flags,
                             subtaskMPI_comms[t->subtype], &t->req);
           else
-            err = MPI_Issend(t->ci->parts, t->ci->count, part_mpi_type,
-                             t->cj->nodeID, t->flags,
+            err = MPI_Issend(t->ci->hydro.parts, t->ci->hydro.count,
+                             part_mpi_type, t->cj->nodeID, t->flags,
                              subtaskMPI_comms[t->subtype], &t->req);
-          // message( "sending %i parts with tag=%i from %i to %i." ,
-          //     t->ci->count , t->flags , s->nodeID , t->cj->nodeID );
-          // fflush(stdout);
         } else if (t->subtype == task_subtype_gpart) {
-          if ((t->ci->gcount * sizeof(struct gpart)) > s->mpi_message_limit)
-            err = MPI_Isend(t->ci->gparts, t->ci->gcount, gpart_mpi_type,
-                            t->cj->nodeID, t->flags,
+          if ((t->ci->grav.count * sizeof(struct gpart)) > s->mpi_message_limit)
+            err = MPI_Isend(t->ci->grav.parts, t->ci->grav.count,
+                            gpart_mpi_type, t->cj->nodeID, t->flags,
                             subtaskMPI_comms[t->subtype], &t->req);
           else
-            err = MPI_Issend(t->ci->gparts, t->ci->gcount, gpart_mpi_type,
-                             t->cj->nodeID, t->flags,
+            err = MPI_Issend(t->ci->grav.parts, t->ci->grav.count,
+                             gpart_mpi_type, t->cj->nodeID, t->flags,
                              subtaskMPI_comms[t->subtype], &t->req);
         } else if (t->subtype == task_subtype_spart) {
-          if ((t->ci->scount * sizeof(struct spart)) > s->mpi_message_limit)
-            err = MPI_Isend(t->ci->sparts, t->ci->scount, spart_mpi_type,
-                            t->cj->nodeID, t->flags,
+          if ((t->ci->stars.count * sizeof(struct spart)) >
+              s->mpi_message_limit)
+            err = MPI_Isend(t->ci->stars.parts, t->ci->stars.count,
+                            spart_mpi_type, t->cj->nodeID, t->flags,
                             subtaskMPI_comms[t->subtype], &t->req);
           else
-            err = MPI_Issend(t->ci->sparts, t->ci->scount, spart_mpi_type,
-                             t->cj->nodeID, t->flags,
+            err = MPI_Issend(t->ci->stars.parts, t->ci->stars.count,
+                             spart_mpi_type, t->cj->nodeID, t->flags,
                              subtaskMPI_comms[t->subtype], &t->req);
         } else if (t->subtype == task_subtype_multipole) {
           t->buff = (struct gravity_tensors *)malloc(
-              sizeof(struct gravity_tensors) * t->ci->pcell_size);
+              sizeof(struct gravity_tensors) * t->ci->mpi.pcell_size);
           cell_pack_multipoles(t->ci, (struct gravity_tensors *)t->buff);
-          err = MPI_Isend(t->buff,
-                          t->ci->pcell_size * sizeof(struct gravity_tensors),
-                          MPI_BYTE, t->cj->nodeID, t->flags,
-                          subtaskMPI_comms[t->subtype], &t->req);
+          err = MPI_Isend(t->buff, t->ci->mpi.pcell_size, multipole_mpi_type,
+                          t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
+                          &t->req);
         } else {
           error("Unknown communication sub-type");
         }
@@ -1714,9 +1995,7 @@ struct task *scheduler_done(struct scheduler *s, struct task *t) {
 
   /* Task definitely done, signal any sleeping runners. */
   if (!t->implicit) {
-#ifdef SWIFT_DEBUG_TASKS
     t->toc = getticks();
-#endif
     pthread_mutex_lock(&s->sleep_mutex);
     atomic_dec(&s->waiting);
     pthread_cond_broadcast(&s->sleep_cond);
@@ -1757,9 +2036,7 @@ struct task *scheduler_unlock(struct scheduler *s, struct task *t) {
 
   /* Task definitely done. */
   if (!t->implicit) {
-#ifdef SWIFT_DEBUG_TASKS
     t->toc = getticks();
-#endif
     pthread_mutex_lock(&s->sleep_mutex);
     atomic_dec(&s->waiting);
     pthread_cond_broadcast(&s->sleep_cond);
@@ -1843,13 +2120,13 @@ struct task *scheduler_gettask(struct scheduler *s, int qid,
     }
   }
 
-#ifdef SWIFT_DEBUG_TASKS
   /* Start the timer on this task, if we got one. */
   if (res != NULL) {
     res->tic = getticks();
+#ifdef SWIFT_DEBUG_TASKS
     res->rid = qid;
-  }
 #endif
+  }
 
   /* No milk today. */
   return res;
@@ -1967,3 +2244,61 @@ void scheduler_free_tasks(struct scheduler *s) {
   }
   s->size = 0;
 }
+
+/**
+ * @brief write down each task level
+ */
+void scheduler_write_task_level(const struct scheduler *s) {
+  /* init */
+  const int max_depth = 30;
+  const struct task *tasks = s->tasks;
+  int nr_tasks = s->nr_tasks;
+
+  /* Init counter */
+  int size = task_type_count * task_subtype_count * max_depth;
+  int *count = (int *)malloc(size * sizeof(int));
+  if (count == NULL) error("Failed to allocate memory");
+
+  for (int i = 0; i < size; i++) count[i] = 0;
+
+  /* Count tasks */
+  for (int i = 0; i < nr_tasks; i++) {
+    const struct task *t = &tasks[i];
+    if (t->ci) {
+
+      if ((int)t->ci->depth >= max_depth)
+        error("Cell is too deep, you need to increase max_depth");
+
+      int ind = t->type * task_subtype_count * max_depth;
+      ind += t->subtype * max_depth;
+      ind += (int)t->ci->depth;
+
+      count[ind] += 1;
+    }
+  }
+
+  /* Open file */
+  char filename[200] = "task_level.txt";
+  FILE *f = fopen(filename, "w");
+  if (f == NULL) error("Error opening task level file.");
+
+  /* Print header */
+  fprintf(f, "# task_type, task_subtype, depth, count\n");
+
+  /* Print tasks level */
+  for (int i = 0; i < size; i++) {
+    if (count[i] == 0) continue;
+
+    int type = i / (task_subtype_count * max_depth);
+    int subtype = i - task_subtype_count * max_depth * type;
+    subtype /= max_depth;
+    int depth = i - task_subtype_count * max_depth * type;
+    depth -= subtype * max_depth;
+    fprintf(f, "%s %s %i %i\n", taskID_names[type], subtaskID_names[subtype],
+            depth, count[i]);
+  }
+
+  /* clean up */
+  fclose(f);
+  free(count);
+}
diff --git a/src/scheduler.h b/src/scheduler.h
index 1a75544de12b8402e553e3ae2b84e2d8a65c56e8..3fb1c2e43bddd73a08edd40c9c2969ea20ee6d39 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -120,6 +120,7 @@ struct scheduler {
  */
 __attribute__((always_inline)) INLINE static void scheduler_activate(
     struct scheduler *s, struct task *t) {
+
   if (atomic_cas(&t->skip, 1, 0)) {
     t->wait = 0;
     int ind = atomic_inc(&s->active_count);
@@ -143,7 +144,9 @@ scheduler_activate_send(struct scheduler *s, struct link *link, int nodeID) {
   struct link *l = NULL;
   for (l = link; l != NULL && l->t->cj->nodeID != nodeID; l = l->next)
     ;
-  if (l == NULL) error("Missing link to send task.");
+  if (l == NULL) {
+    error("Missing link to send task.");
+  }
   scheduler_activate(s, l->t);
   return l;
 }
@@ -173,5 +176,6 @@ void scheduler_print_tasks(const struct scheduler *s, const char *fileName);
 void scheduler_clean(struct scheduler *s);
 void scheduler_free_tasks(struct scheduler *s);
 void scheduler_write_dependencies(struct scheduler *s, int verbose);
+void scheduler_write_task_level(const struct scheduler *s);
 
 #endif /* SWIFT_SCHEDULER_H */
diff --git a/src/serial_io.c b/src/serial_io.c
index 5db1ef9e0a14d75ac875722a4c0e48edd9880a92..d4db3400b0e405ccb08e1b14bead2b5c65817b22 100644
--- a/src/serial_io.c
+++ b/src/serial_io.c
@@ -42,6 +42,7 @@
 #include "cooling_io.h"
 #include "dimension.h"
 #include "engine.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "gravity_io.h"
 #include "gravity_properties.h"
@@ -52,8 +53,11 @@
 #include "memuse.h"
 #include "part.h"
 #include "part_type.h"
+#include "star_formation_io.h"
 #include "stars_io.h"
+#include "tracers_io.h"
 #include "units.h"
+#include "velociraptor_io.h"
 #include "xmf.h"
 
 /**
@@ -150,11 +154,41 @@ void readArray(hid_t grp, const struct io_props props, size_t N,
     /* message("Converting ! factor=%e", factor); */
 
     if (io_is_double_precision(props.type)) {
-      double* temp_d = temp;
+      double* temp_d = (double*)temp;
       for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
     } else {
-      float* temp_f = temp;
-      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
+      float* temp_f = (float*)temp;
+
+#ifdef SWIFT_DEBUG_CHECKS
+      float maximum = 0.f;
+      float minimum = FLT_MAX;
+#endif
+
+      /* Loop that converts the Units */
+      for (size_t i = 0; i < num_elements; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Find the absolute minimum and maximum values */
+        const float abstemp_f = fabsf(temp_f[i]);
+        if (abstemp_f != 0.f) {
+          maximum = max(maximum, abstemp_f);
+          minimum = min(minimum, abstemp_f);
+        }
+#endif
+
+        /* Convert the float units */
+        temp_f[i] *= factor;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* The two possible errors: larger than float or smaller
+       * than float precision. */
+      if (factor * maximum > FLT_MAX) {
+        error("Unit conversion results in numbers larger than floats");
+      } else if (factor * minimum < FLT_MIN) {
+        error("Numbers smaller than float precision");
+      }
+#endif
     }
   }
 
@@ -191,7 +225,7 @@ void readArray(hid_t grp, const struct io_props props, size_t N,
   }
 
   /* Copy temporary buffer to particle data */
-  char* temp_c = temp;
+  char* temp_c = (char*)temp;
   for (size_t i = 0; i < N; ++i)
     memcpy(props.field + i * props.partSize, &temp_c[i * copySize], copySize);
 
@@ -271,8 +305,9 @@ void prepareArray(const struct engine* e, hid_t grp, char* fileName,
   if (h_data < 0) error("Error while creating dataspace '%s'.", props.name);
 
   /* Write XMF description for this data set */
-  xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
-                 props.dimension, props.type);
+  if (xmfFile != NULL)
+    xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
+                   props.dimension, props.type);
 
   /* Write unit conversion factors for this data set */
   char buffer[FIELD_BUFFER_SIZE];
@@ -399,7 +434,6 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName,
  * @param Ngas (output) The number of #part read from the file on that node.
  * @param Ngparts (output) The number of #gpart read from the file on that node.
  * @param Nstars (output) The number of #spart read from the file on that node.
- * @param periodic (output) 1 if the volume is periodic, 0 if not.
  * @param flag_entropy (output) 1 if the ICs contained Entropy in the
  * InternalEnergy field
  * @param with_hydro Are we reading gas particles ?
@@ -428,11 +462,11 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName,
 void read_ic_serial(char* fileName, const struct unit_system* internal_units,
                     double dim[3], struct part** parts, struct gpart** gparts,
                     struct spart** sparts, size_t* Ngas, size_t* Ngparts,
-                    size_t* Nstars, int* periodic, int* flag_entropy,
-                    int with_hydro, int with_gravity, int with_stars,
-                    int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                    int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info,
-                    int n_threads, int dry_run) {
+                    size_t* Nstars, int* flag_entropy, int with_hydro,
+                    int with_gravity, int with_stars, int cleanup_h,
+                    int cleanup_sqrt_a, double h, double a, int mpi_rank,
+                    int mpi_size, MPI_Comm comm, MPI_Info info, int n_threads,
+                    int dry_run) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -445,7 +479,8 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
   long long offset[swift_type_count] = {0};
   int dimension = 3; /* Assume 3D if nothing is specified */
   size_t Ndm = 0;
-  struct unit_system* ic_units = malloc(sizeof(struct unit_system));
+  struct unit_system* ic_units =
+      (struct unit_system*)malloc(sizeof(struct unit_system));
 
   /* First read some information about the content */
   if (mpi_rank == 0) {
@@ -456,17 +491,6 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
     if (h_file < 0)
       error("Error while opening file '%s' for initial read.", fileName);
 
-    /* Open header to read simulation properties */
-    /* message("Reading runtime parameters..."); */
-    h_grp = H5Gopen(h_file, "/RuntimePars", H5P_DEFAULT);
-    if (h_grp < 0) error("Error while opening runtime parameters\n");
-
-    /* Read the relevant information */
-    io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
-
-    /* Close runtime parameters */
-    H5Gclose(h_grp);
-
     /* Open header to read simulation properties */
     /* message("Reading file header..."); */
     h_grp = H5Gopen(h_file, "/Header", H5P_DEFAULT);
@@ -481,6 +505,23 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
       error("ICs dimensionality (%dD) does not match code dimensionality (%dD)",
             dimension, (int)hydro_dimension);
 
+    /* Check whether the number of files is specified (if the info exists) */
+    const hid_t hid_files = H5Aexists(h_grp, "NumFilesPerSnapshot");
+    int num_files = 1;
+    if (hid_files < 0)
+      error(
+          "Error while testing the existance of 'NumFilesPerSnapshot' "
+          "attribute");
+    if (hid_files > 0)
+      io_read_attribute(h_grp, "NumFilesPerSnapshot", INT, &num_files);
+    if (num_files != 1)
+      error(
+          "ICs are split over multiples files (%d). SWIFT cannot handle this "
+          "case. The script /tools/combine_ics.py is availalbe in the "
+          "repository "
+          "to combine files into a valid input file.",
+          num_files);
+
     /* Read the relevant information and print status */
     int flag_entropy_temp[6];
     io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp);
@@ -561,7 +602,6 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
 
   /* Now need to broadcast that information to all ranks. */
   MPI_Bcast(flag_entropy, 1, MPI_INT, 0, comm);
-  MPI_Bcast(periodic, 1, MPI_INT, 0, comm);
   MPI_Bcast(&N_total, swift_type_count, MPI_LONG_LONG_INT, 0, comm);
   MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm);
   MPI_Bcast(ic_units, sizeof(struct unit_system), MPI_BYTE, 0, comm);
@@ -575,19 +615,19 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
   /* Allocate memory to store SPH particles */
   if (with_hydro) {
     *Ngas = N[0];
-    if (posix_memalign((void*)parts, part_align, *Ngas * sizeof(struct part)) !=
-        0)
+    if (posix_memalign((void**)parts, part_align,
+                       *Ngas * sizeof(struct part)) != 0)
       error("Error while allocating memory for SPH particles");
     bzero(*parts, *Ngas * sizeof(struct part));
     memuse_report("parts", (*Ngas) * sizeof(struct part));
   }
 
-  /* Allocate memory to store star particles */
+  /* Allocate memory to store stars particles */
   if (with_stars) {
-    *Nstars = N[swift_type_star];
-    if (posix_memalign((void*)sparts, spart_align,
+    *Nstars = N[swift_type_stars];
+    if (posix_memalign((void**)sparts, spart_align,
                        *Nstars * sizeof(struct spart)) != 0)
-      error("Error while allocating memory for star particles");
+      error("Error while allocating memory for stars particles");
     bzero(*sparts, *Nstars * sizeof(struct spart));
     memuse_report("sparts", (*Nstars) * sizeof(struct spart));
   }
@@ -597,8 +637,8 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
     Ndm = N[1];
     *Ngparts = (with_hydro ? N[swift_type_gas] : 0) +
                N[swift_type_dark_matter] +
-               (with_stars ? N[swift_type_star] : 0);
-    if (posix_memalign((void*)gparts, gpart_align,
+               (with_stars ? N[swift_type_stars] : 0);
+    if (posix_memalign((void**)gparts, gpart_align,
                        *Ngparts * sizeof(struct gpart)) != 0)
       error("Error while allocating memory for gravity particles");
     bzero(*gparts, *Ngparts * sizeof(struct gpart));
@@ -659,10 +699,10 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
             }
             break;
 
-          case swift_type_star:
+          case swift_type_stars:
             if (with_stars) {
               Nparticles = *Nstars;
-              star_read_particles(*sparts, list, &num_fields);
+              stars_read_particles(*sparts, list, &num_fields);
             }
             break;
 
@@ -704,9 +744,9 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units,
     /* Duplicate the hydro particles into gparts */
     if (with_hydro) io_duplicate_hydro_gparts(&tp, *parts, *gparts, *Ngas, Ndm);
 
-    /* Duplicate the star particles into gparts */
+    /* Duplicate the stars particles into gparts */
     if (with_stars)
-      io_duplicate_star_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
+      io_duplicate_stars_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
 
     threadpool_clean(&tp);
   }
@@ -743,35 +783,54 @@ void write_output_serial(struct engine* e, const char* baseName,
                          int mpi_size, MPI_Comm comm, MPI_Info info) {
 
   hid_t h_file = 0, h_grp = 0;
-  const size_t Ngas = e->s->nr_parts;
-  const size_t Nstars = e->s->nr_sparts;
-  const size_t Ntot = e->s->nr_gparts;
-  int periodic = e->s->periodic;
   int numFiles = 1;
   const struct part* parts = e->s->parts;
   const struct xpart* xparts = e->s->xparts;
   const struct gpart* gparts = e->s->gparts;
-  struct gpart* dmparts = NULL;
   const struct spart* sparts = e->s->sparts;
-  const struct cooling_function_data* cooling = e->cooling_func;
   struct swift_params* params = e->parameter_file;
+  const int with_cosmology = e->policy & engine_policy_cosmology;
+  const int with_cooling = e->policy & engine_policy_cooling;
+  const int with_temperature = e->policy & engine_policy_temperature;
+#ifdef HAVE_VELOCIRAPTOR
+  const int with_stf = (e->policy & engine_policy_structure_finding) &&
+                       (e->s->gpart_group_data != NULL);
+#else
+  const int with_stf = 0;
+#endif
+
   FILE* xmfFile = 0;
 
-  /* Number of unassociated gparts */
-  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
+  /* Number of particles currently in the arrays */
+  const size_t Ntot = e->s->nr_gparts;
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  // const size_t Nbaryons = Ngas + Nstars;
+  // const size_t Ndm = Ntot > 0 ? Ntot - Nbaryons : 0;
+
+  /* Number of particles that we will write */
+  const size_t Ntot_written =
+      e->s->nr_gparts - e->s->nr_inhibited_gparts - e->s->nr_extra_gparts;
+  const size_t Ngas_written =
+      e->s->nr_parts - e->s->nr_inhibited_parts - e->s->nr_extra_parts;
+  const size_t Nstars_written =
+      e->s->nr_sparts - e->s->nr_inhibited_sparts - e->s->nr_extra_sparts;
+  const size_t Nbaryons_written = Ngas_written + Nstars_written;
+  const size_t Ndm_written =
+      Ntot_written > 0 ? Ntot_written - Nbaryons_written : 0;
 
   /* File name */
   char fileName[FILENAME_BUFFER_SIZE];
-  if (e->snapshot_label_delta == 1)
-    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-             e->snapshot_output_count + e->snapshot_label_first);
-  else
+  if (e->snapshot_int_time_label_on)
     snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%06i.hdf5", baseName,
-             e->snapshot_output_count * e->snapshot_label_delta +
-                 e->snapshot_label_first);
+             (int)round(e->time));
+  else
+    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+             e->snapshot_output_count);
 
   /* Compute offset in the file and total number of particles */
-  size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
+  size_t N[swift_type_count] = {
+      Ngas_written, Ndm_written, 0, 0, Nstars_written, 0};
   long long N_total[swift_type_count] = {0};
   long long offset[swift_type_count] = {0};
   MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
@@ -801,28 +860,25 @@ void write_output_serial(struct engine* e, const char* baseName,
     h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
     if (h_file < 0) error("Error while opening file '%s'.", fileName);
 
-    /* Open header to write simulation properties */
-    /* message("Writing runtime parameters..."); */
-    h_grp = H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT,
-                      H5P_DEFAULT);
-    if (h_grp < 0) error("Error while creating runtime parameters group\n");
-
-    /* Write the relevant information */
-    io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
-
-    /* Close runtime parameters */
-    H5Gclose(h_grp);
-
     /* Open header to write simulation properties */
     /* message("Writing file header..."); */
     h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
     if (h_grp < 0) error("Error while creating file header\n");
 
+    /* Convert basic output information to snapshot units */
+    const double factor_time =
+        units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_TIME);
+    const double factor_length = units_conversion_factor(
+        internal_units, snapshot_units, UNIT_CONV_LENGTH);
+    const double dblTime = e->time * factor_time;
+    const double dim[3] = {e->s->dim[0] * factor_length,
+                           e->s->dim[1] * factor_length,
+                           e->s->dim[2] * factor_length};
+
     /* Print the relevant information and print status */
-    io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
-    double dblTime = e->time;
+    io_write_attribute(h_grp, "BoxSize", DOUBLE, dim, 3);
     io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
-    int dimension = (int)hydro_dimension;
+    const int dimension = (int)hydro_dimension;
     io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
     io_write_attribute(h_grp, "Redshift", DOUBLE, &e->cosmology->z, 1);
     io_write_attribute(h_grp, "Scale-factor", DOUBLE, &e->cosmology->a, 1);
@@ -875,8 +931,10 @@ void write_output_serial(struct engine* e, const char* baseName,
     h_grp = H5Gcreate(h_file, "/SubgridScheme", H5P_DEFAULT, H5P_DEFAULT,
                       H5P_DEFAULT);
     if (h_grp < 0) error("Error while creating subgrid group");
-    cooling_write_flavour(h_grp);
+    entropy_floor_write_flavour(h_grp);
+    cooling_write_flavour(h_grp, e->cooling_func);
     chemistry_write_flavour(h_grp);
+    tracers_write_flavour(h_grp);
     H5Gclose(h_grp);
 
     /* Print the gravity parameters */
@@ -888,6 +946,15 @@ void write_output_serial(struct engine* e, const char* baseName,
       H5Gclose(h_grp);
     }
 
+    /* Print the stellar parameters */
+    if (e->policy & engine_policy_stars) {
+      h_grp = H5Gcreate(h_file, "/StarsScheme", H5P_DEFAULT, H5P_DEFAULT,
+                        H5P_DEFAULT);
+      if (h_grp < 0) error("Error while creating stars group");
+      stars_props_print_snapshot(h_grp, e->stars_properties);
+      H5Gclose(h_grp);
+    }
+
     /* Print the cosmological model */
     h_grp =
         H5Gcreate(h_file, "/Cosmology", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -974,6 +1041,32 @@ void write_output_serial(struct engine* e, const char* baseName,
     H5Fclose(h_file);
   }
 
+  /* Now write the top-level cell structure */
+  hid_t h_file_cells = 0, h_grp_cells = 0;
+  if (mpi_rank == 0) {
+
+    /* Open the snapshot on rank 0 */
+    h_file_cells = H5Fopen(fileName, H5F_ACC_RDWR, H5P_DEFAULT);
+    if (h_file_cells < 0)
+      error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);
+
+    /* Create the group we want in the file */
+    h_grp_cells = H5Gcreate(h_file_cells, "/Cells", H5P_DEFAULT, H5P_DEFAULT,
+                            H5P_DEFAULT);
+    if (h_grp_cells < 0) error("Error while creating cells group");
+  }
+
+  /* Write the location of the particles in the arrays */
+  io_write_cell_offsets(h_grp_cells, e->s->cdim, e->s->cells_top,
+                        e->s->nr_cells, e->s->width, mpi_rank, N_total, offset,
+                        internal_units, snapshot_units);
+
+  /* Close everything */
+  if (mpi_rank == 0) {
+    H5Gclose(h_grp_cells);
+    H5Fclose(h_file_cells);
+  }
+
   /* Now loop over ranks and write the data */
   for (int rank = 0; rank < mpi_size; ++rank) {
 
@@ -1008,36 +1101,158 @@ void write_output_serial(struct engine* e, const char* baseName,
         struct io_props list[100];
         size_t Nparticles = 0;
 
+        struct part* parts_written = NULL;
+        struct xpart* xparts_written = NULL;
+        struct gpart* gparts_written = NULL;
+        struct velociraptor_gpart_data* gpart_group_data_written = NULL;
+        struct spart* sparts_written = NULL;
+
         /* Write particle fields from the particle structure */
         switch (ptype) {
 
-          case swift_type_gas:
-            Nparticles = Ngas;
-            hydro_write_particles(parts, xparts, list, &num_fields);
-            num_fields += chemistry_write_particles(parts, list + num_fields);
-            num_fields +=
-                cooling_write_particles(xparts, list + num_fields, cooling);
-            break;
-
-          case swift_type_dark_matter:
-            /* Allocate temporary array */
-            if (posix_memalign((void*)&dmparts, gpart_align,
-                               Ndm * sizeof(struct gpart)) != 0)
-              error("Error while allocating temporary memory for DM particles");
-            bzero(dmparts, Ndm * sizeof(struct gpart));
-
-            /* Collect the DM particles from gpart */
-            io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
-
-            /* Write DM particles */
-            Nparticles = Ndm;
-            darkmatter_write_particles(dmparts, list, &num_fields);
-            break;
-
-          case swift_type_star:
-            Nparticles = Nstars;
-            star_write_particles(sparts, list, &num_fields);
-            break;
+          case swift_type_gas: {
+            if (Ngas == Ngas_written) {
+
+              /* No inhibted particles: easy case */
+              Nparticles = Ngas;
+              hydro_write_particles(parts, xparts, list, &num_fields);
+              num_fields += chemistry_write_particles(parts, list + num_fields);
+              if (with_cooling || with_temperature) {
+                num_fields += cooling_write_particles(
+                    parts, xparts, list + num_fields, e->cooling_func);
+              }
+              if (with_stf) {
+                num_fields +=
+                    velociraptor_write_parts(parts, xparts, list + num_fields);
+              }
+              num_fields += tracers_write_particles(
+                  parts, xparts, list + num_fields, with_cosmology);
+              num_fields += star_formation_write_particles(parts, xparts,
+                                                           list + num_fields);
+
+            } else {
+
+              /* Ok, we need to fish out the particles we want */
+              Nparticles = Ngas_written;
+
+              /* Allocate temporary arrays */
+              if (posix_memalign((void**)&parts_written, part_align,
+                                 Ngas_written * sizeof(struct part)) != 0)
+                error("Error while allocating temporart memory for parts");
+              if (posix_memalign((void**)&xparts_written, xpart_align,
+                                 Ngas_written * sizeof(struct xpart)) != 0)
+                error("Error while allocating temporart memory for xparts");
+
+              /* Collect the particles we want to write */
+              io_collect_parts_to_write(parts, xparts, parts_written,
+                                        xparts_written, Ngas, Ngas_written);
+
+              /* Select the fields to write */
+              hydro_write_particles(parts_written, xparts_written, list,
+                                    &num_fields);
+              num_fields +=
+                  chemistry_write_particles(parts_written, list + num_fields);
+              if (with_cooling || with_temperature) {
+                num_fields +=
+                    cooling_write_particles(parts_written, xparts_written,
+                                            list + num_fields, e->cooling_func);
+              }
+              if (with_stf) {
+                num_fields += velociraptor_write_parts(
+                    parts_written, xparts_written, list + num_fields);
+              }
+              num_fields +=
+                  tracers_write_particles(parts_written, xparts_written,
+                                          list + num_fields, with_cosmology);
+              num_fields += star_formation_write_particles(
+                  parts_written, xparts_written, list + num_fields);
+            }
+          } break;
+
+          case swift_type_dark_matter: {
+            if (Ntot == Ndm_written) {
+
+              /* This is a DM-only run without inhibited particles */
+              Nparticles = Ntot;
+              darkmatter_write_particles(gparts, list, &num_fields);
+              if (with_stf) {
+                num_fields += velociraptor_write_gparts(e->s->gpart_group_data,
+                                                        list + num_fields);
+              }
+            } else {
+
+              /* Ok, we need to fish out the particles we want */
+              Nparticles = Ndm_written;
+
+              /* Allocate temporary array */
+              if (posix_memalign((void**)&gparts_written, gpart_align,
+                                 Ndm_written * sizeof(struct gpart)) != 0)
+                error("Error while allocating temporart memory for gparts");
+
+              if (with_stf) {
+                if (posix_memalign(
+                        (void**)&gpart_group_data_written, gpart_align,
+                        Ndm_written * sizeof(struct velociraptor_gpart_data)) !=
+                    0)
+                  error(
+                      "Error while allocating temporart memory for gparts STF "
+                      "data");
+              }
+
+              /* Collect the non-inhibited DM particles from gpart */
+              io_collect_gparts_to_write(
+                  gparts, e->s->gpart_group_data, gparts_written,
+                  gpart_group_data_written, Ntot, Ndm_written, with_stf);
+
+              /* Select the fields to write */
+              darkmatter_write_particles(gparts_written, list, &num_fields);
+              if (with_stf) {
+                num_fields += velociraptor_write_gparts(
+                    gpart_group_data_written, list + num_fields);
+              }
+            }
+          } break;
+
+          case swift_type_stars: {
+            if (Nstars == Nstars_written) {
+
+              /* No inhibted particles: easy case */
+              Nparticles = Nstars;
+              stars_write_particles(sparts, list, &num_fields);
+              num_fields +=
+                  chemistry_write_sparticles(sparts, list + num_fields);
+              num_fields += tracers_write_sparticles(sparts, list + num_fields,
+                                                     with_cosmology);
+              if (with_stf) {
+                num_fields +=
+                    velociraptor_write_sparts(sparts, list + num_fields);
+              }
+            } else {
+
+              /* Ok, we need to fish out the particles we want */
+              Nparticles = Nstars_written;
+
+              /* Allocate temporary arrays */
+              if (posix_memalign((void**)&sparts_written, spart_align,
+                                 Nstars_written * sizeof(struct spart)) != 0)
+                error("Error while allocating temporart memory for sparts");
+
+              /* Collect the particles we want to write */
+              io_collect_sparts_to_write(sparts, sparts_written, Nstars,
+                                         Nstars_written);
+
+              /* Select the fields to write */
+              stars_write_particles(sparts_written, list, &num_fields);
+              num_fields +=
+                  chemistry_write_sparticles(sparts, list + num_fields);
+              num_fields += tracers_write_sparticles(sparts, list + num_fields,
+                                                     with_cosmology);
+              if (with_stf) {
+                num_fields += velociraptor_write_sparts(sparts_written,
+                                                        list + num_fields);
+              }
+            }
+          } break;
 
           default:
             error("Particle Type %d not yet supported. Aborting", ptype);
@@ -1059,10 +1274,11 @@ void write_output_serial(struct engine* e, const char* baseName,
         }
 
         /* Free temporary array */
-        if (dmparts) {
-          free(dmparts);
-          dmparts = 0;
-        }
+        if (parts_written) free(parts_written);
+        if (xparts_written) free(xparts_written);
+        if (gparts_written) free(gparts_written);
+        if (gpart_group_data_written) free(gpart_group_data_written);
+        if (sparts_written) free(sparts_written);
 
         /* Close particle group */
         H5Gclose(h_grp);
diff --git a/src/serial_io.h b/src/serial_io.h
index 6644e34bb32bcbd63250f25502563155eda0a293..07df76fe869fa0612bba5cf953faadd8bc63f29e 100644
--- a/src/serial_io.h
+++ b/src/serial_io.h
@@ -31,23 +31,30 @@
 
 /* Includes. */
 #include "engine.h"
+#include "io_properties.h"
 #include "part.h"
 #include "units.h"
 
 void read_ic_serial(char* fileName, const struct unit_system* internal_units,
                     double dim[3], struct part** parts, struct gpart** gparts,
                     struct spart** sparts, size_t* Ngas, size_t* Ngparts,
-                    size_t* Nstars, int* periodic, int* flag_entropy,
-                    int with_hydro, int with_gravity, int with_stars,
-                    int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                    int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info,
-                    int nr_threads, int dry_run);
+                    size_t* Nstars, int* flag_entropy, int with_hydro,
+                    int with_gravity, int with_stars, int cleanup_h,
+                    int cleanup_sqrt_a, double h, double a, int mpi_rank,
+                    int mpi_size, MPI_Comm comm, MPI_Info info, int nr_threads,
+                    int dry_run);
 
 void write_output_serial(struct engine* e, const char* baseName,
                          const struct unit_system* internal_units,
                          const struct unit_system* snapshot_units, int mpi_rank,
                          int mpi_size, MPI_Comm comm, MPI_Info info);
 
+void writeArray(const struct engine* e, hid_t grp, char* fileName,
+                FILE* xmfFile, char* partTypeGroupName,
+                const struct io_props props, size_t N, long long N_total,
+                int mpi_rank, long long offset,
+                const struct unit_system* internal_units,
+                const struct unit_system* snapshot_units);
 #endif
 
 #endif /* SWIFT_SERIAL_IO_H */
diff --git a/src/single_io.c b/src/single_io.c
index 87a2f88b2d5152994b163790762ced248f4581d5..250f018754e355577600b1acc6d4b36946002fac 100644
--- a/src/single_io.c
+++ b/src/single_io.c
@@ -41,6 +41,7 @@
 #include "cooling_io.h"
 #include "dimension.h"
 #include "engine.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "gravity_io.h"
 #include "gravity_properties.h"
@@ -51,8 +52,11 @@
 #include "memuse.h"
 #include "part.h"
 #include "part_type.h"
+#include "star_formation_io.h"
 #include "stars_io.h"
+#include "tracers_io.h"
 #include "units.h"
+#include "velociraptor_io.h"
 #include "xmf.h"
 
 /**
@@ -128,9 +132,40 @@ void readArray(hid_t h_grp, const struct io_props props, size_t N,
     if (io_is_double_precision(props.type)) {
       double* temp_d = (double*)temp;
       for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= unit_factor;
+
     } else {
       float* temp_f = (float*)temp;
-      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= unit_factor;
+
+#ifdef SWIFT_DEBUG_CHECKS
+      float maximum = 0.f;
+      float minimum = FLT_MAX;
+#endif
+
+      /* Loop that converts the Units */
+      for (size_t i = 0; i < num_elements; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+        /* Find the absolute minimum and maximum values */
+        const float abstemp_f = fabsf(temp_f[i]);
+        if (abstemp_f != 0.f) {
+          maximum = max(maximum, abstemp_f);
+          minimum = min(minimum, abstemp_f);
+        }
+#endif
+
+        /* Convert the float units */
+        temp_f[i] *= unit_factor;
+      }
+
+#ifdef SWIFT_DEBUG_CHECKS
+      /* The two possible errors: larger than float or smaller
+       * than float precision. */
+      if (unit_factor * maximum > FLT_MAX) {
+        error("Unit conversion results in numbers larger than floats");
+      } else if (unit_factor * minimum < FLT_MIN) {
+        error("Numbers smaller than float precision");
+      }
+#endif
     }
   }
 
@@ -282,8 +317,9 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName,
   if (h_err < 0) error("Error while writing data array '%s'.", props.name);
 
   /* Write XMF description for this data set */
-  xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N,
-                 props.dimension, props.type);
+  if (xmfFile != NULL)
+    xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N,
+                   props.dimension, props.type);
 
   /* Write unit conversion factors for this data set */
   char buffer[FIELD_BUFFER_SIZE];
@@ -315,7 +351,6 @@ void writeArray(const struct engine* e, hid_t grp, char* fileName,
  * @param Ngas (output) number of Gas particles read.
  * @param Ngparts (output) The number of #gpart read.
  * @param Nstars (output) The number of #spart read.
- * @param periodic (output) 1 if the volume is periodic, 0 if not.
  * @param flag_entropy (output) 1 if the ICs contained Entropy in the
  * InternalEnergy field
  * @param with_hydro Are we reading gas particles ?
@@ -340,10 +375,10 @@ void read_ic_single(const char* fileName,
                     const struct unit_system* internal_units, double dim[3],
                     struct part** parts, struct gpart** gparts,
                     struct spart** sparts, size_t* Ngas, size_t* Ngparts,
-                    size_t* Nstars, int* periodic, int* flag_entropy,
-                    int with_hydro, int with_gravity, int with_stars,
-                    int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                    int n_threads, int dry_run) {
+                    size_t* Nstars, int* flag_entropy, int with_hydro,
+                    int with_gravity, int with_stars, int cleanup_h,
+                    int cleanup_sqrt_a, double h, double a, int n_threads,
+                    int dry_run) {
 
   hid_t h_file = 0, h_grp = 0;
   /* GADGET has only cubic boxes (in cosmological mode) */
@@ -360,17 +395,6 @@ void read_ic_single(const char* fileName,
   h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
   if (h_file < 0) error("Error while opening file '%s'.", fileName);
 
-  /* Open header to read simulation properties */
-  /* message("Reading runtime parameters..."); */
-  h_grp = H5Gopen(h_file, "/RuntimePars", H5P_DEFAULT);
-  if (h_grp < 0) error("Error while opening runtime parameters\n");
-
-  /* Read the relevant information */
-  io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
-
-  /* Close runtime parameters */
-  H5Gclose(h_grp);
-
   /* Open header to read simulation properties */
   /* message("Reading file header..."); */
   h_grp = H5Gopen(h_file, "/Header", H5P_DEFAULT);
@@ -385,6 +409,21 @@ void read_ic_single(const char* fileName,
     error("ICs dimensionality (%dD) does not match code dimensionality (%dD)",
           dimension, (int)hydro_dimension);
 
+  /* Check whether the number of files is specified (if the info exists) */
+  const hid_t hid_files = H5Aexists(h_grp, "NumFilesPerSnapshot");
+  int num_files = 1;
+  if (hid_files < 0)
+    error(
+        "Error while testing the existance of 'NumFilesPerSnapshot' attribute");
+  if (hid_files > 0)
+    io_read_attribute(h_grp, "NumFilesPerSnapshot", INT, &num_files);
+  if (num_files != 1)
+    error(
+        "ICs are split over multiples files (%d). SWIFT cannot handle this "
+        "case. The script /tools/combine_ics.py is availalbe in the repository "
+        "to combine files into a valid input file.",
+        num_files);
+
   /* Read the relevant information and print status */
   int flag_entropy_temp[6];
   io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp);
@@ -473,10 +512,10 @@ void read_ic_single(const char* fileName,
 
   /* Allocate memory to store star particles */
   if (with_stars) {
-    *Nstars = N[swift_type_star];
+    *Nstars = N[swift_type_stars];
     if (posix_memalign((void**)sparts, spart_align,
                        *Nstars * sizeof(struct spart)) != 0)
-      error("Error while allocating memory for star particles");
+      error("Error while allocating memory for stars particles");
     bzero(*sparts, *Nstars * sizeof(struct spart));
     memuse_report("sparts", (*Nstars) * sizeof(struct spart));
   }
@@ -486,7 +525,7 @@ void read_ic_single(const char* fileName,
     Ndm = N[swift_type_dark_matter];
     *Ngparts = (with_hydro ? N[swift_type_gas] : 0) +
                N[swift_type_dark_matter] +
-               (with_stars ? N[swift_type_star] : 0);
+               (with_stars ? N[swift_type_stars] : 0);
     if (posix_memalign((void**)gparts, gpart_align,
                        *Ngparts * sizeof(struct gpart)) != 0)
       error("Error while allocating memory for gravity particles");
@@ -536,10 +575,10 @@ void read_ic_single(const char* fileName,
         }
         break;
 
-      case swift_type_star:
+      case swift_type_stars:
         if (with_stars) {
           Nparticles = *Nstars;
-          star_read_particles(*sparts, list, &num_fields);
+          stars_read_particles(*sparts, list, &num_fields);
         }
         break;
 
@@ -572,7 +611,7 @@ void read_ic_single(const char* fileName,
 
     /* Duplicate the star particles into gparts */
     if (with_stars)
-      io_duplicate_star_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
+      io_duplicate_stars_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
 
     threadpool_clean(&tp);
   }
@@ -607,34 +646,56 @@ void write_output_single(struct engine* e, const char* baseName,
                          const struct unit_system* snapshot_units) {
 
   hid_t h_file = 0, h_grp = 0;
-  const size_t Ngas = e->s->nr_parts;
-  const size_t Nstars = e->s->nr_sparts;
-  const size_t Ntot = e->s->nr_gparts;
-  int periodic = e->s->periodic;
   int numFiles = 1;
   const struct part* parts = e->s->parts;
   const struct xpart* xparts = e->s->xparts;
   const struct gpart* gparts = e->s->gparts;
-  struct gpart* dmparts = NULL;
   const struct spart* sparts = e->s->sparts;
-  const struct cooling_function_data* cooling = e->cooling_func;
   struct swift_params* params = e->parameter_file;
-
-  /* Number of unassociated gparts */
-  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
-
-  long long N_total[swift_type_count] = {
-      (long long)Ngas, (long long)Ndm, 0, 0, (long long)Nstars, 0};
+  const int with_cosmology = e->policy & engine_policy_cosmology;
+  const int with_cooling = e->policy & engine_policy_cooling;
+  const int with_temperature = e->policy & engine_policy_temperature;
+#ifdef HAVE_VELOCIRAPTOR
+  const int with_stf = (e->policy & engine_policy_structure_finding) &&
+                       (e->s->gpart_group_data != NULL);
+#else
+  const int with_stf = 0;
+#endif
+
+  /* Number of particles currently in the arrays */
+  const size_t Ntot = e->s->nr_gparts;
+  const size_t Ngas = e->s->nr_parts;
+  const size_t Nstars = e->s->nr_sparts;
+  // const size_t Nbaryons = Ngas + Nstars;
+  // const size_t Ndm = Ntot > 0 ? Ntot - Nbaryons : 0;
+
+  /* Number of particles that we will write */
+  const size_t Ntot_written =
+      e->s->nr_gparts - e->s->nr_inhibited_gparts - e->s->nr_extra_gparts;
+  const size_t Ngas_written =
+      e->s->nr_parts - e->s->nr_inhibited_parts - e->s->nr_extra_parts;
+  const size_t Nstars_written =
+      e->s->nr_sparts - e->s->nr_inhibited_sparts - e->s->nr_extra_sparts;
+  const size_t Nbaryons_written = Ngas_written + Nstars_written;
+  const size_t Ndm_written =
+      Ntot_written > 0 ? Ntot_written - Nbaryons_written : 0;
+
+  /* Format things in a Gadget-friendly array */
+  long long N_total[swift_type_count] = {(long long)Ngas_written,
+                                         (long long)Ndm_written,
+                                         0,
+                                         0,
+                                         (long long)Nstars_written,
+                                         0};
 
   /* File name */
   char fileName[FILENAME_BUFFER_SIZE];
-  if (e->snapshot_label_delta == 1)
-    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
-             e->snapshot_output_count + e->snapshot_label_first);
-  else
+  if (e->snapshot_int_time_label_on)
     snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%06i.hdf5", baseName,
-             e->snapshot_output_count * e->snapshot_label_delta +
-                 e->snapshot_label_first);
+             (int)round(e->time));
+  else
+    snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
+             e->snapshot_output_count);
 
   /* First time, we need to create the XMF file */
   if (e->snapshot_output_count == 0) xmf_create_file(baseName);
@@ -651,28 +712,25 @@ void write_output_single(struct engine* e, const char* baseName,
   h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
   if (h_file < 0) error("Error while opening file '%s'.", fileName);
 
-  /* Open header to write simulation properties */
-  /* message("Writing runtime parameters..."); */
-  h_grp =
-      H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-  if (h_grp < 0) error("Error while creating runtime parameters group\n");
-
-  /* Write the relevant information */
-  io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
-
-  /* Close runtime parameters */
-  H5Gclose(h_grp);
-
   /* Open header to write simulation properties */
   /* message("Writing file header..."); */
   h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating file header\n");
 
+  /* Convert basic output information to snapshot units */
+  const double factor_time =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_TIME);
+  const double factor_length =
+      units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_LENGTH);
+  const double dblTime = e->time * factor_time;
+  const double dim[3] = {e->s->dim[0] * factor_length,
+                         e->s->dim[1] * factor_length,
+                         e->s->dim[2] * factor_length};
+
   /* Print the relevant information and print status */
-  io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
-  double dblTime = e->time;
+  io_write_attribute(h_grp, "BoxSize", DOUBLE, dim, 3);
   io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
-  int dimension = (int)hydro_dimension;
+  const int dimension = (int)hydro_dimension;
   io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
   io_write_attribute(h_grp, "Redshift", DOUBLE, &e->cosmology->z, 1);
   io_write_attribute(h_grp, "Scale-factor", DOUBLE, &e->cosmology->a, 1);
@@ -725,8 +783,10 @@ void write_output_single(struct engine* e, const char* baseName,
   h_grp = H5Gcreate(h_file, "/SubgridScheme", H5P_DEFAULT, H5P_DEFAULT,
                     H5P_DEFAULT);
   if (h_grp < 0) error("Error while creating subgrid group");
-  cooling_write_flavour(h_grp);
+  entropy_floor_write_flavour(h_grp);
+  cooling_write_flavour(h_grp, e->cooling_func);
   chemistry_write_flavour(h_grp);
+  tracers_write_flavour(h_grp);
   H5Gclose(h_grp);
 
   /* Print the gravity parameters */
@@ -738,6 +798,15 @@ void write_output_single(struct engine* e, const char* baseName,
     H5Gclose(h_grp);
   }
 
+  /* Print the stellar parameters */
+  if (e->policy & engine_policy_stars) {
+    h_grp = H5Gcreate(h_file, "/StarsScheme", H5P_DEFAULT, H5P_DEFAULT,
+                      H5P_DEFAULT);
+    if (h_grp < 0) error("Error while creating stars group");
+    stars_props_print_snapshot(h_grp, e->stars_properties);
+    H5Gclose(h_grp);
+  }
+
   /* Print the cosmological model  */
   h_grp =
       H5Gcreate(h_file, "/Cosmology", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -769,6 +838,17 @@ void write_output_single(struct engine* e, const char* baseName,
   /* Print the system of Units used internally */
   io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
 
+  /* Now write the top-level cell structure */
+  long long global_offsets[swift_type_count] = {0};
+  h_grp = H5Gcreate(h_file, "/Cells", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+  if (h_grp < 0) error("Error while creating cells group");
+
+  /* Write the location of the particles in the arrays */
+  io_write_cell_offsets(h_grp, e->s->cdim, e->s->cells_top, e->s->nr_cells,
+                        e->s->width, e->nodeID, N_total, global_offsets,
+                        internal_units, snapshot_units);
+  H5Gclose(h_grp);
+
   /* Tell the user if a conversion will be needed */
   if (e->verbose) {
     if (units_are_equal(snapshot_units, internal_units)) {
@@ -824,36 +904,154 @@ void write_output_single(struct engine* e, const char* baseName,
     struct io_props list[100];
     size_t N = 0;
 
+    struct part* parts_written = NULL;
+    struct xpart* xparts_written = NULL;
+    struct gpart* gparts_written = NULL;
+    struct velociraptor_gpart_data* gpart_group_data_written = NULL;
+    struct spart* sparts_written = NULL;
+
     /* Write particle fields from the particle structure */
     switch (ptype) {
 
-      case swift_type_gas:
-        N = Ngas;
-        hydro_write_particles(parts, xparts, list, &num_fields);
-        num_fields += chemistry_write_particles(parts, list + num_fields);
-        num_fields +=
-            cooling_write_particles(xparts, list + num_fields, cooling);
-        break;
-
-      case swift_type_dark_matter:
-        /* Allocate temporary array */
-        if (posix_memalign((void**)&dmparts, gpart_align,
-                           Ndm * sizeof(struct gpart)) != 0)
-          error("Error while allocating temporary memory for DM particles");
-        bzero(dmparts, Ndm * sizeof(struct gpart));
-
-        /* Collect the DM particles from gpart */
-        io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm);
-
-        /* Write DM particles */
-        N = Ndm;
-        darkmatter_write_particles(dmparts, list, &num_fields);
-        break;
-
-      case swift_type_star:
-        N = Nstars;
-        star_write_particles(sparts, list, &num_fields);
-        break;
+      case swift_type_gas: {
+        if (Ngas == Ngas_written) {
+
+          /* No inhibted particles: easy case */
+          N = Ngas;
+          hydro_write_particles(parts, xparts, list, &num_fields);
+          num_fields += chemistry_write_particles(parts, list + num_fields);
+          if (with_cooling || with_temperature) {
+            num_fields += cooling_write_particles(
+                parts, xparts, list + num_fields, e->cooling_func);
+          }
+          if (with_stf) {
+            num_fields +=
+                velociraptor_write_parts(parts, xparts, list + num_fields);
+          }
+          num_fields += tracers_write_particles(
+              parts, xparts, list + num_fields, with_cosmology);
+          num_fields +=
+              star_formation_write_particles(parts, xparts, list + num_fields);
+
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Ngas_written;
+
+          /* Allocate temporary arrays */
+          if (posix_memalign((void**)&parts_written, part_align,
+                             Ngas_written * sizeof(struct part)) != 0)
+            error("Error while allocating temporart memory for parts");
+          if (posix_memalign((void**)&xparts_written, xpart_align,
+                             Ngas_written * sizeof(struct xpart)) != 0)
+            error("Error while allocating temporart memory for xparts");
+
+          /* Collect the particles we want to write */
+          io_collect_parts_to_write(parts, xparts, parts_written,
+                                    xparts_written, Ngas, Ngas_written);
+
+          /* Select the fields to write */
+          hydro_write_particles(parts_written, xparts_written, list,
+                                &num_fields);
+          num_fields +=
+              chemistry_write_particles(parts_written, list + num_fields);
+          if (with_cooling || with_temperature) {
+            num_fields +=
+                cooling_write_particles(parts_written, xparts_written,
+                                        list + num_fields, e->cooling_func);
+          }
+          if (with_stf) {
+            num_fields += velociraptor_write_parts(
+                parts_written, xparts_written, list + num_fields);
+          }
+          num_fields += tracers_write_particles(
+              parts_written, xparts_written, list + num_fields, with_cosmology);
+          num_fields += star_formation_write_particles(
+              parts_written, xparts_written, list + num_fields);
+        }
+      } break;
+
+      case swift_type_dark_matter: {
+        if (Ntot == Ndm_written) {
+
+          /* This is a DM-only run without inhibited particles */
+          N = Ntot;
+          darkmatter_write_particles(gparts, list, &num_fields);
+          if (with_stf) {
+            num_fields += velociraptor_write_gparts(e->s->gpart_group_data,
+                                                    list + num_fields);
+          }
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Ndm_written;
+
+          /* Allocate temporary array */
+          if (posix_memalign((void**)&gparts_written, gpart_align,
+                             Ndm_written * sizeof(struct gpart)) != 0)
+            error("Error while allocating temporart memory for gparts");
+
+          if (with_stf) {
+            if (posix_memalign(
+                    (void**)&gpart_group_data_written, gpart_align,
+                    Ndm_written * sizeof(struct velociraptor_gpart_data)) != 0)
+              error(
+                  "Error while allocating temporart memory for gparts STF "
+                  "data");
+          }
+
+          /* Collect the non-inhibited DM particles from gpart */
+          io_collect_gparts_to_write(gparts, e->s->gpart_group_data,
+                                     gparts_written, gpart_group_data_written,
+                                     Ntot, Ndm_written, with_stf);
+
+          /* Select the fields to write */
+          darkmatter_write_particles(gparts_written, list, &num_fields);
+          if (with_stf) {
+            num_fields += velociraptor_write_gparts(gpart_group_data_written,
+                                                    list + num_fields);
+          }
+        }
+      } break;
+
+      case swift_type_stars: {
+        if (Nstars == Nstars_written) {
+
+          /* No inhibted particles: easy case */
+          N = Nstars;
+          stars_write_particles(sparts, list, &num_fields);
+          num_fields += chemistry_write_sparticles(sparts, list + num_fields);
+          num_fields += tracers_write_sparticles(sparts, list + num_fields,
+                                                 with_cosmology);
+          if (with_stf) {
+            num_fields += velociraptor_write_sparts(sparts, list + num_fields);
+          }
+        } else {
+
+          /* Ok, we need to fish out the particles we want */
+          N = Nstars_written;
+
+          /* Allocate temporary arrays */
+          if (posix_memalign((void**)&sparts_written, spart_align,
+                             Nstars_written * sizeof(struct spart)) != 0)
+            error("Error while allocating temporart memory for sparts");
+
+          /* Collect the particles we want to write */
+          io_collect_sparts_to_write(sparts, sparts_written, Nstars,
+                                     Nstars_written);
+
+          /* Select the fields to write */
+          stars_write_particles(sparts_written, list, &num_fields);
+          num_fields +=
+              chemistry_write_sparticles(sparts_written, list + num_fields);
+          num_fields += tracers_write_sparticles(
+              sparts_written, list + num_fields, with_cosmology);
+          if (with_stf) {
+            num_fields +=
+                velociraptor_write_sparts(sparts_written, list + num_fields);
+          }
+        }
+      } break;
 
       default:
         error("Particle Type %d not yet supported. Aborting", ptype);
@@ -873,11 +1071,12 @@ void write_output_single(struct engine* e, const char* baseName,
                    internal_units, snapshot_units);
     }
 
-    /* Free temporary array */
-    if (dmparts) {
-      free(dmparts);
-      dmparts = NULL;
-    }
+    /* Free temporary arrays */
+    if (parts_written) free(parts_written);
+    if (xparts_written) free(xparts_written);
+    if (gparts_written) free(gparts_written);
+    if (gpart_group_data_written) free(gpart_group_data_written);
+    if (sparts_written) free(sparts_written);
 
     /* Close particle group */
     H5Gclose(h_grp);
diff --git a/src/single_io.h b/src/single_io.h
index a0ce8370dfa1009f28e7c399b3f1db345c23de49..62285c3da210243e76347f33780146604673656f 100644
--- a/src/single_io.h
+++ b/src/single_io.h
@@ -26,6 +26,7 @@
 
 /* Includes. */
 #include "engine.h"
+#include "io_properties.h"
 #include "part.h"
 #include "units.h"
 
@@ -33,15 +34,21 @@ void read_ic_single(const char* fileName,
                     const struct unit_system* internal_units, double dim[3],
                     struct part** parts, struct gpart** gparts,
                     struct spart** sparts, size_t* Ngas, size_t* Ndm,
-                    size_t* Nstars, int* periodic, int* flag_entropy,
-                    int with_hydro, int with_gravity, int with_stars,
-                    int cleanup_h, int cleanup_sqrt_a, double h, double a,
-                    int nr_threads, int dry_run);
+                    size_t* Nstars, int* flag_entropy, int with_hydro,
+                    int with_gravity, int with_stars, int cleanup_h,
+                    int cleanup_sqrt_a, double h, double a, int nr_threads,
+                    int dry_run);
 
 void write_output_single(struct engine* e, const char* baseName,
                          const struct unit_system* internal_units,
                          const struct unit_system* snapshot_units);
 
+void writeArray(const struct engine* e, hid_t grp, char* fileName,
+                FILE* xmfFile, char* partTypeGroupName,
+                const struct io_props props, size_t N,
+                const struct unit_system* internal_units,
+                const struct unit_system* snapshot_units);
+
 #endif /* HAVE_HDF5 && !WITH_MPI */
 
 #endif /* SWIFT_SINGLE_IO_H */
diff --git a/src/sourceterms.c b/src/sourceterms.c
deleted file mode 100644
index 993045e61503e4e78b855816921bc057706b76d1..0000000000000000000000000000000000000000
--- a/src/sourceterms.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-
-/* Config parameters. */
-#include "../config.h"
-
-/* Local includes. */
-#include "const.h"
-#include "hydro.h"
-#include "parser.h"
-#include "units.h"
-
-/* This object's header. */
-#include "sourceterms.h"
-
-/**
- * @brief Initialises the sourceterms
- *
- * @param parameter_file The parsed parameter file
- * @param us The current internal system of units
- * @param source the structure that has all the source term properties
- */
-void sourceterms_init(struct swift_params *parameter_file,
-                      struct unit_system *us, struct sourceterms *source) {
-#ifdef SOURCETERMS_SN_FEEDBACK
-  supernova_init(parameter_file, us, source);
-#endif /* SOURCETERMS_SN_FEEDBACK */
-};
-
-/**
- * @brief Prints the properties of the source terms to stdout
- * @param source the structure that has all the source term properties
- */
-void sourceterms_print(struct sourceterms *source) {
-#ifdef SOURCETERMS_NONE
-  error(" no sourceterms defined yet you ran with -F");
-#ifdef SOURCETERMS_SN_FEEDBACK
-#error "can't have sourceterms when defined SOURCETERMS_NONE"
-#endif
-#endif
-#ifdef SOURCETERMS_SN_FEEDBACK
-  supernova_print(source);
-#endif /* SOURCETERMS_SN_FEEDBACK */
-};
-
-/**
- * @brief Write a sourceterms struct to the given FILE as a stream of bytes.
- *
- * @param sourceterms the struct
- * @param stream the file stream
- */
-void sourceterms_struct_dump(const struct sourceterms *sourceterms,
-                             FILE *stream) {
-  restart_write_blocks((void *)sourceterms, sizeof(struct sourceterms), 1,
-                       stream, "sourceterms", "sourceterms");
-}
-
-/**
- * @brief Restore a sourceterms struct from the given FILE as a stream of
- * bytes.
- *
- * @param sourceterms the struct
- * @param stream the file stream
- */
-void sourceterms_struct_restore(const struct sourceterms *sourceterms,
-                                FILE *stream) {
-  restart_read_blocks((void *)sourceterms, sizeof(struct sourceterms), 1,
-                      stream, NULL, "sourceterms");
-}
diff --git a/src/sourceterms.h b/src/sourceterms.h
deleted file mode 100644
index 407d2f19362531a3fd3537889593c484319919b5..0000000000000000000000000000000000000000
--- a/src/sourceterms.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Coypright (c) 2015 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-#ifndef SWIFT_SOURCETERMS_H
-#define SWIFT_SOURCETERMS_H
-
-/**
- * @file src/sourceterms.h
- * @brief Branches between the different sourceterms functions.
- */
-
-#include "./const.h"
-#include "runner.h"
-
-#ifdef SOURCETERMS_SN_FEEDBACK
-#include "sourceterms/sn_feedback/sn_feedback_struct.h"
-#endif
-
-/* So far only one model here */
-struct sourceterms {
-#ifdef SOURCETERMS_SN_FEEDBACK
-  struct supernova_struct supernova;
-#endif
-};
-#ifdef SOURCETERMS_SN_FEEDBACK
-#include "sourceterms/sn_feedback/sn_feedback.h"
-#endif
-
-void sourceterms_init(struct swift_params* parameter_file,
-                      struct unit_system* us, struct sourceterms* source);
-void sourceterms_print(struct sourceterms* source);
-
-/* Dump/restore. */
-void sourceterms_struct_dump(const struct sourceterms* source, FILE* stream);
-void sourceterms_struct_restore(const struct sourceterms* source, FILE* stream);
-
-/**
- * @brief Routines related to source terms
- * @param cell_min: corner of cell to test
- * @param cell_width: width of cell to test
- * @param sourceterms: properties of source terms to test
- * @param dimen: dimensionality of the problem
- *
- * This routine tests whether a source term should be applied to this cell
- * return: 1 if yes, return: 0 if no
- */
-
-__attribute__((always_inline)) INLINE static int sourceterms_test_cell(
-    const double cell_min[], const double cell_width[],
-    struct sourceterms* sourceterms, const int dimen) {
-#ifdef SOURCETERMS_SN_FEEDBACK
-  return supernova_feedback_test_cell(cell_min, cell_width, sourceterms, dimen);
-#endif
-  return 0;
-};
-
-__attribute__((always_inline)) INLINE static void sourceterms_apply(
-    struct runner* r, struct sourceterms* sourceterms, struct cell* c) {
-#ifdef SOURCETERMS_SN_FEEDBACK
-  supernova_feedback_apply(r, sourceterms, c);
-#endif
-};
-#endif /*  SWIFT_SOURCETERMS_H */
diff --git a/src/sourceterms/sn_feedback/sn_feedback.h b/src/sourceterms/sn_feedback/sn_feedback.h
deleted file mode 100644
index 411673c37e82ff89d906425d1cadaa135c46a38d..0000000000000000000000000000000000000000
--- a/src/sourceterms/sn_feedback/sn_feedback.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-#ifndef SWIFT_SN_FEEDBACK_H
-#define SWIFT_SN_FEEDBACK_H
-#include <float.h>
-/* Config parameters. */
-#include "../config.h"
-
-#include "engine.h"
-#include "equation_of_state.h"
-#include "hydro.h"
-#include "runner.h"
-#include "timestep.h"
-
-/**
- * @file src/sourceterms/sn_feedback.h
- *
- * @brief Routines related to sourceterms (supernova feedback): determine if
- * feedback occurs in this cell
- *
- * @param cell_min: corner of cell to test
- * @param cell_width: width of cell to test
- * @param sourceterms: properties of source terms to test
- * @param dimen: dimensionality of the problem
- *
- * This routine tests whether a source term should be applied to this cell
- * return: 1 if yes, return: 0 if no
- */
-__attribute__((always_inline)) INLINE static int supernova_feedback_test_cell(
-    const double cell_min[], const double cell_width[],
-    struct sourceterms* sourceterms, const int dimen) {
-  if (sourceterms->supernova.status == supernova_is_done) return 0;
-
-  const double location[3] = {sourceterms->supernova.x,
-                              sourceterms->supernova.y,
-                              sourceterms->supernova.z};
-  for (int i = 0; i < dimen; i++) {
-    if (cell_min[i] > location[i]) return 0;
-    if ((cell_min[i] + cell_width[i]) <= location[i]) return 0;
-  };
-  return 1;
-};
-
-/**
- * @file src/sourceterms/sn_feedback.h
- *
- * @brief Routines related to source terms (supernova feedback): perform
- * feedback in this cell
- * @param r: the runner
- * @param sourceterms the structure describing the source terms properties
- * @param c the cell to apply feedback to
- *
- * This routine heats an individual particle (p), increasing its thermal energy
- * per unit mass
- *      by supernova energy / particle mass.
- */
-__attribute__((always_inline)) INLINE static void supernova_feedback_apply(
-    struct runner* restrict r, struct sourceterms* restrict sourceterms,
-    struct cell* restrict c) {
-
-  const int count = c->count;
-  struct part* restrict parts = c->parts;
-  struct xpart* restrict xparts = c->xparts;
-  const double timeBase = r->e->timeBase;
-  const int ti_current = r->e->ti_current;
-
-  /* inject SN energy into the particle with highest id in this cell if it is
-   * active */
-  int imax = 0;
-  struct part* restrict p_sn = NULL;
-  struct xpart* restrict xp_sn = NULL;
-
-  for (int i = 0; i < count; i++) {
-
-    /* Get a direct pointer on the part. */
-    struct part* restrict p = &parts[i];
-    if (p->id > imax) {
-      imax = p->id;
-      p_sn = p;
-      xp_sn = &xparts[i];
-    }
-  }
-
-  /* Is this part within the time step? */
-  if (p_sn->ti_begin == ti_current) {
-
-    /* Does this time step straddle the feedback injection time? */
-    const float t_begin = p_sn->ti_begin * timeBase;
-    const float t_end = p_sn->ti_end * timeBase;
-    if (t_begin <= sourceterms->supernova.time &&
-        t_end > sourceterms->supernova.time) {
-
-      /* store old time step */
-      const int dti_old = p_sn->ti_end - p_sn->ti_begin;
-
-      /* add supernova feedback */
-      const float u_old = hydro_get_internal_energy(p_sn, 0);
-      const float ent_old = hydro_get_entropy(p_sn, 0.0);
-      const float u_new =
-          u_old + sourceterms->supernova.energy / hydro_get_mass(p_sn);
-      hydro_set_internal_energy(p_sn, u_new);
-      const float u_set = hydro_get_internal_energy(p_sn, 0.0);
-      const float ent_set = hydro_get_entropy(p_sn, 0.0);
-      message(
-          " applied super nova, time = %e, location= %e %e %e velocity= %e %e "
-          "%e",
-          ti_current * timeBase, p_sn->x[0], p_sn->x[1], p_sn->x[2], p_sn->v[0],
-          p_sn->v[1], p_sn->v[2]);
-      message(
-          " injected SN energy in particle = %lld, increased energy from %e to "
-          "%e and is notw %e, entropy from %e to %e",
-          p_sn->id, u_old, u_new, u_set, ent_old, ent_set);
-
-      /* label supernova as done */
-      sourceterms->supernova.status = supernova_is_done;
-
-      /* update timestep if new time step shorter than old time step */
-      const int dti = get_part_timestep(p_sn, xp_sn, r->e);
-      if (dti < dti_old) {
-        p_sn->ti_end = p_sn->ti_begin + dti;
-        message(" changed timestep from %d to %d", dti_old, dti);
-
-        /* apply simple time-step limiter on all particles in same cell:
-         */
-        int i_limit = 0;
-        for (int i = 0; i < count; i++) {
-          struct part* restrict p = &parts[i];
-          const int dti_old = p->ti_end - p->ti_begin;
-          if (dti_old > 2 * dti) {
-            i_limit++;
-            const int dti_new = 2 * dti;
-            p->ti_end = p->ti_begin + dti_new;
-            message(" old step = %d new step = %d", dti_old, dti_new);
-          } else
-            message(" old step = %d", dti_old);
-        }
-        message(" count= %d limited timestep of %d particles ", count, i_limit);
-      } /* end of limiter */
-      error("end");
-    }
-  }
-};
-
-/**
- * @file src/sourceterms/sn_feedback.h
- *
- * @brief Routine to initialise supernova feedback
- * @param parameterfile: the parse parmeter file
- * @param us: the unit system in use
- * @param sourceterms the structure describing the source terms properties
- *
- * This routine heats an individual particle (p), increasing its thermal energy
- * per unit mass
- *      by supernova energy / particle mass.
- */
-
-__attribute__((always_inline)) INLINE static void supernova_init(
-    struct swift_params* parameter_file, struct unit_system* us,
-    struct sourceterms* source) {
-  source->supernova.time = parser_get_param_double(parameter_file, "SN:time");
-  source->supernova.energy =
-      parser_get_param_double(parameter_file, "SN:energy");
-  source->supernova.x = parser_get_param_double(parameter_file, "SN:x");
-  source->supernova.y = parser_get_param_double(parameter_file, "SN:y");
-  source->supernova.z = parser_get_param_double(parameter_file, "SN:z");
-  source->supernova.status = supernova_is_not_done;
-}
-__attribute__((always_inline)) INLINE static void supernova_print(
-    struct sourceterms* source) {
-  message(
-      " Single SNe of energy= %e will explode at time= %e at location "
-      "(%e,%e,%e)",
-      source->supernova.energy, source->supernova.time, source->supernova.x,
-      source->supernova.y, source->supernova.z);
-}
-#endif /* SWIFT_SN_FEEDBACK_H */
diff --git a/src/sourceterms/sn_feedback/sn_feedback_struct.h b/src/sourceterms/sn_feedback/sn_feedback_struct.h
deleted file mode 100644
index dd1842a6717c6c5a20352324cbe6b018c73e7b3e..0000000000000000000000000000000000000000
--- a/src/sourceterms/sn_feedback/sn_feedback_struct.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Copyright (c) 2016 Tom Theuns (tom.theuns@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-/**
- * @file src/sourceterms/sn_feedback_struct.h
- * @brief Routines related to source terms (feedback)
- *
- * enumeration type that sets if supernova explosion is done (is_done) or still
- * needs doing (is_not_done)
- */
-#ifndef SWIFT_SN_FEEDBACK_STRUCT_H
-#define SWIFT_SN_FEEDBACK_STRUCT_H
-enum supernova_status { supernova_is_done, supernova_is_not_done };
-
-/**
- * @file src/sourceterms/sn_feedback_struct.h
- * @brief Routines related to source terms (feedback)
- *
- * The structure that describes the source term (supernova feedback)
- * It specifies the time, energy and location of the desired supernova
- * explosion, and a status (supernova_is_done/supernova_is_not_done)
- * that records the status of the supernova
- */
-struct supernova_struct {
-  double time;
-  double energy;
-  double x, y, z;
-  enum supernova_status status;
-};
-#endif /* SWIFT_SN_FEEDBACK_STRUCT_H */
diff --git a/src/space.c b/src/space.c
index 31fa3b94052c9def9d529b2a3f12f83b046f354b..0b7aec7a59eae21cfd9f74395772ac1f9581950e 100644
--- a/src/space.c
+++ b/src/space.c
@@ -57,9 +57,11 @@
 #include "multipole.h"
 #include "restart.h"
 #include "sort_part.h"
+#include "star_formation.h"
 #include "stars.h"
 #include "threadpool.h"
 #include "tools.h"
+#include "tracers.h"
 
 /* Split size. */
 int space_splitsize = space_splitsize_default;
@@ -67,8 +69,20 @@ int space_subsize_pair_hydro = space_subsize_pair_hydro_default;
 int space_subsize_self_hydro = space_subsize_self_hydro_default;
 int space_subsize_pair_grav = space_subsize_pair_grav_default;
 int space_subsize_self_grav = space_subsize_self_grav_default;
-int space_subdepth_grav = space_subdepth_grav_default;
+int space_subdepth_diff_grav = space_subdepth_diff_grav_default;
 int space_maxsize = space_maxsize_default;
+
+/*! Number of extra #part we allocate memory for per top-level cell */
+int space_extra_parts = space_extra_parts_default;
+
+/*! Number of extra #spart we allocate memory for per top-level cell */
+int space_extra_sparts = space_extra_sparts_default;
+
+/*! Number of extra #gpart we allocate memory for per top-level cell */
+int space_extra_gparts = space_extra_gparts_default;
+
+/*! Expected maximal number of strays received at a rebuild */
+int space_expected_max_nr_strays = space_expected_max_nr_strays_default;
 #ifdef SWIFT_DEBUG_CHECKS
 int last_cell_id;
 #endif
@@ -101,9 +115,14 @@ struct parallel_sort {
  */
 struct index_data {
   struct space *s;
-  struct cell *cells;
   int *ind;
   int *cell_counts;
+  size_t count_inhibited_part;
+  size_t count_inhibited_gpart;
+  size_t count_inhibited_spart;
+  size_t count_extra_part;
+  size_t count_extra_gpart;
+  size_t count_extra_spart;
 };
 
 /**
@@ -133,16 +152,16 @@ void space_rebuild_recycle_rec(struct space *s, struct cell *c,
         c->progeny[k]->next = *cell_rec_begin;
         *cell_rec_begin = c->progeny[k];
 
-        if (s->gravity) {
-          c->progeny[k]->multipole->next = *multipole_rec_begin;
-          *multipole_rec_begin = c->progeny[k]->multipole;
+        if (s->with_self_gravity) {
+          c->progeny[k]->grav.multipole->next = *multipole_rec_begin;
+          *multipole_rec_begin = c->progeny[k]->grav.multipole;
         }
 
         if (*cell_rec_end == NULL) *cell_rec_end = *cell_rec_begin;
-        if (s->gravity && *multipole_rec_end == NULL)
+        if (s->with_self_gravity && *multipole_rec_end == NULL)
           *multipole_rec_end = *multipole_rec_begin;
 
-        c->progeny[k]->multipole = NULL;
+        c->progeny[k]->grav.multipole = NULL;
         c->progeny[k] = NULL;
       }
 }
@@ -163,66 +182,113 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
     if (cell_rec_begin != NULL)
       space_recycle_list(s, cell_rec_begin, cell_rec_end, multipole_rec_begin,
                          multipole_rec_end);
-    c->sorts = NULL;
+    c->hydro.sorts = NULL;
+    c->stars.sorts = NULL;
     c->nr_tasks = 0;
-    c->density = NULL;
-    c->gradient = NULL;
-    c->force = NULL;
-    c->grav = NULL;
-    c->dx_max_part = 0.0f;
-    c->dx_max_sort = 0.0f;
-    c->sorted = 0;
-    c->count = 0;
-    c->gcount = 0;
-    c->scount = 0;
-    c->init_grav = NULL;
-    c->init_grav_out = NULL;
-    c->extra_ghost = NULL;
-    c->ghost_in = NULL;
-    c->ghost_out = NULL;
-    c->ghost = NULL;
+    c->grav.nr_mm_tasks = 0;
+    c->hydro.density = NULL;
+    c->hydro.gradient = NULL;
+    c->hydro.force = NULL;
+    c->hydro.limiter = NULL;
+    c->grav.grav = NULL;
+    c->grav.mm = NULL;
+    c->hydro.dx_max_part = 0.0f;
+    c->hydro.dx_max_sort = 0.0f;
+    c->stars.dx_max_part = 0.f;
+    c->stars.dx_max_sort = 0.f;
+    c->hydro.sorted = 0;
+    c->stars.sorted = 0;
+    c->hydro.count = 0;
+    c->hydro.count_total = 0;
+    c->hydro.updated = 0;
+    c->hydro.inhibited = 0;
+    c->grav.count = 0;
+    c->grav.count_total = 0;
+    c->grav.updated = 0;
+    c->grav.inhibited = 0;
+    c->stars.count = 0;
+    c->stars.count_total = 0;
+    c->stars.updated = 0;
+    c->stars.inhibited = 0;
+    c->grav.init = NULL;
+    c->grav.init_out = NULL;
+    c->hydro.extra_ghost = NULL;
+    c->hydro.ghost_in = NULL;
+    c->hydro.ghost_out = NULL;
+    c->hydro.ghost = NULL;
+    c->stars.ghost = NULL;
+    c->stars.density = NULL;
+    c->stars.feedback = NULL;
     c->kick1 = NULL;
     c->kick2 = NULL;
     c->timestep = NULL;
-    c->end_force = NULL;
-    c->drift_part = NULL;
-    c->drift_gpart = NULL;
-    c->cooling = NULL;
-    c->sourceterms = NULL;
-    c->grav_long_range = NULL;
-    c->grav_down_in = NULL;
-    c->grav_down = NULL;
-    c->grav_mesh = NULL;
+    c->timestep_limiter = NULL;
+    c->hydro.end_force = NULL;
+    c->hydro.drift = NULL;
+    c->stars.drift = NULL;
+    c->stars.stars_in = NULL;
+    c->stars.stars_out = NULL;
+    c->grav.drift = NULL;
+    c->grav.drift_out = NULL;
+    c->hydro.cooling = NULL;
+    c->grav.long_range = NULL;
+    c->grav.down_in = NULL;
+    c->grav.down = NULL;
+    c->grav.mesh = NULL;
+    c->grav.end_force = NULL;
     c->super = c;
-    c->super_hydro = c;
-    c->super_gravity = c;
-    c->parts = NULL;
-    c->xparts = NULL;
-    c->gparts = NULL;
-    c->sparts = NULL;
-    c->do_sub_sort = 0;
-    c->do_grav_sub_drift = 0;
-    c->do_sub_drift = 0;
-    if (s->gravity) bzero(c->multipole, sizeof(struct gravity_tensors));
-    for (int i = 0; i < 13; i++)
-      if (c->sort[i] != NULL) {
-        free(c->sort[i]);
-        c->sort[i] = NULL;
+    c->hydro.super = c;
+    c->grav.super = c;
+    c->hydro.parts = NULL;
+    c->hydro.xparts = NULL;
+    c->grav.parts = NULL;
+    c->stars.parts = NULL;
+    c->hydro.do_sub_sort = 0;
+    c->stars.do_sub_sort = 0;
+    c->hydro.do_sub_drift = 0;
+    c->grav.do_sub_drift = 0;
+    c->stars.do_sub_drift = 0;
+    c->hydro.do_sub_limiter = 0;
+    c->hydro.do_limiter = 0;
+    c->hydro.ti_end_min = -1;
+    c->hydro.ti_end_max = -1;
+    c->grav.ti_end_min = -1;
+    c->grav.ti_end_max = -1;
+    c->stars.ti_end_min = -1;
+    c->stars.ti_end_max = -1;
+#ifdef SWIFT_DEBUG_CHECKS
+    c->cellID = 0;
+#endif
+    if (s->with_self_gravity)
+      bzero(c->grav.multipole, sizeof(struct gravity_tensors));
+    for (int i = 0; i < 13; i++) {
+      if (c->hydro.sort[i] != NULL) {
+        free(c->hydro.sort[i]);
+        c->hydro.sort[i] = NULL;
+      }
+      if (c->stars.sort[i] != NULL) {
+        free(c->stars.sort[i]);
+        c->stars.sort[i] = NULL;
       }
+    }
 #if WITH_MPI
-    c->tag = -1;
-
-    c->recv_xv = NULL;
-    c->recv_rho = NULL;
-    c->recv_gradient = NULL;
-    c->recv_grav = NULL;
-    c->recv_ti = NULL;
-
-    c->send_xv = NULL;
-    c->send_rho = NULL;
-    c->send_gradient = NULL;
-    c->send_grav = NULL;
-    c->send_ti = NULL;
+    c->mpi.tag = -1;
+
+    c->mpi.hydro.recv_xv = NULL;
+    c->mpi.hydro.recv_rho = NULL;
+    c->mpi.hydro.recv_gradient = NULL;
+    c->mpi.grav.recv = NULL;
+    c->mpi.stars.recv = NULL;
+    c->mpi.recv_ti = NULL;
+    c->mpi.limiter.recv = NULL;
+
+    c->mpi.hydro.send_xv = NULL;
+    c->mpi.hydro.send_rho = NULL;
+    c->mpi.hydro.send_gradient = NULL;
+    c->mpi.grav.send = NULL;
+    c->mpi.stars.send = NULL;
+    c->mpi.send_ti = NULL;
+    c->mpi.limiter.send = NULL;
 #endif
   }
 }
@@ -231,9 +297,16 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
  * @brief Free up any allocated cells.
  */
 void space_free_cells(struct space *s) {
+
+  ticks tic = getticks();
+
   threadpool_map(&s->e->threadpool, space_rebuild_recycle_mapper, s->cells_top,
                  s->nr_cells, sizeof(struct cell), 0, s);
   s->maxdepth = 0;
+
+  if (s->e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 /**
@@ -245,6 +318,7 @@ void space_free_cells(struct space *s) {
 void space_regrid(struct space *s, int verbose) {
 
   const size_t nr_parts = s->nr_parts;
+  const size_t nr_sparts = s->nr_sparts;
   const ticks tic = getticks();
   const integertime_t ti_current = (s->e != NULL) ? s->e->ti_current : 0;
 
@@ -252,17 +326,40 @@ void space_regrid(struct space *s, int verbose) {
   // tic = getticks();
   float h_max = s->cell_min / kernel_gamma / space_stretch;
   if (nr_parts > 0) {
-    if (s->cells_top != NULL) {
+
+    /* Can we use the list of local non-empty top-level cells? */
+    if (s->local_cells_with_particles_top != NULL) {
+      for (int k = 0; k < s->nr_local_cells_with_particles; ++k) {
+        const struct cell *c =
+            &s->cells_top[s->local_cells_with_particles_top[k]];
+        if (c->hydro.h_max > h_max) {
+          h_max = c->hydro.h_max;
+        }
+        if (c->stars.h_max > h_max) {
+          h_max = c->stars.h_max;
+        }
+      }
+
+      /* Can we instead use all the top-level cells? */
+    } else if (s->cells_top != NULL) {
       for (int k = 0; k < s->nr_cells; k++) {
-        if (s->cells_top[k].nodeID == engine_rank &&
-            s->cells_top[k].h_max > h_max) {
-          h_max = s->cells_top[k].h_max;
+        const struct cell *c = &s->cells_top[k];
+        if (c->nodeID == engine_rank && c->hydro.h_max > h_max) {
+          h_max = c->hydro.h_max;
+        }
+        if (c->nodeID == engine_rank && c->stars.h_max > h_max) {
+          h_max = c->stars.h_max;
         }
       }
+
+      /* Last option: run through the particles */
     } else {
       for (size_t k = 0; k < nr_parts; k++) {
         if (s->parts[k].h > h_max) h_max = s->parts[k].h;
       }
+      for (size_t k = 0; k < nr_sparts; k++) {
+        if (s->sparts[k].h > h_max) h_max = s->sparts[k].h;
+      }
     }
   }
 
@@ -335,7 +432,7 @@ void space_regrid(struct space *s, int verbose) {
 
   /* Are we about to allocate new top level cells without a regrid?
    * Can happen when restarting the application. */
-  int no_regrid = (s->cells_top == NULL && oldnodeIDs == NULL);
+  const int no_regrid = (s->cells_top == NULL && oldnodeIDs == NULL);
 #endif
 
   /* Do we need to re-build the upper-level cells? */
@@ -352,7 +449,10 @@ void space_regrid(struct space *s, int verbose) {
     /* Free the old cells, if they were allocated. */
     if (s->cells_top != NULL) {
       space_free_cells(s);
+      free(s->local_cells_with_tasks_top);
       free(s->local_cells_top);
+      free(s->cells_with_particles_top);
+      free(s->local_cells_with_particles_top);
       free(s->cells_top);
       free(s->multipoles_top);
     }
@@ -379,7 +479,7 @@ void space_regrid(struct space *s, int verbose) {
     memuse_report("cells_top", s->nr_cells * sizeof(struct cell));
 
     /* Allocate the multipoles for the top-level cells. */
-    if (s->gravity) {
+    if (s->with_self_gravity) {
       if (posix_memalign((void **)&s->multipoles_top, multipole_align,
                          s->nr_cells * sizeof(struct gravity_tensors)) != 0)
         error("Failed to allocate top-level multipoles.");
@@ -395,15 +495,35 @@ void space_regrid(struct space *s, int verbose) {
     bzero(s->local_cells_top, s->nr_cells * sizeof(int));
     memuse_report("local_cells_top", s->nr_cells * sizeof(int));
 
+    /* Allocate the indices of local cells with tasks */
+    if (posix_memalign((void **)&s->local_cells_with_tasks_top,
+                       SWIFT_STRUCT_ALIGNMENT, s->nr_cells * sizeof(int)) != 0)
+      error("Failed to allocate indices of local top-level cells with tasks.");
+    bzero(s->local_cells_with_tasks_top, s->nr_cells * sizeof(int));
+
+    /* Allocate the indices of cells with particles */
+    if (posix_memalign((void **)&s->cells_with_particles_top,
+                       SWIFT_STRUCT_ALIGNMENT, s->nr_cells * sizeof(int)) != 0)
+      error("Failed to allocate indices of top-level cells with particles.");
+    bzero(s->cells_with_particles_top, s->nr_cells * sizeof(int));
+
+    /* Allocate the indices of local cells with particles */
+    if (posix_memalign((void **)&s->local_cells_with_particles_top,
+                       SWIFT_STRUCT_ALIGNMENT, s->nr_cells * sizeof(int)) != 0)
+      error(
+          "Failed to allocate indices of local top-level cells with "
+          "particles.");
+    bzero(s->local_cells_with_particles_top, s->nr_cells * sizeof(int));
+
     /* Set the cells' locks */
     for (int k = 0; k < s->nr_cells; k++) {
-      if (lock_init(&s->cells_top[k].lock) != 0)
+      if (lock_init(&s->cells_top[k].hydro.lock) != 0)
         error("Failed to init spinlock for hydro.");
-      if (lock_init(&s->cells_top[k].glock) != 0)
+      if (lock_init(&s->cells_top[k].grav.plock) != 0)
         error("Failed to init spinlock for gravity.");
-      if (lock_init(&s->cells_top[k].mlock) != 0)
+      if (lock_init(&s->cells_top[k].grav.mlock) != 0)
         error("Failed to init spinlock for multipoles.");
-      if (lock_init(&s->cells_top[k].slock) != 0)
+      if (lock_init(&s->cells_top[k].stars.lock) != 0)
         error("Failed to init spinlock for stars.");
     }
 
@@ -421,19 +541,35 @@ void space_regrid(struct space *s, int verbose) {
           c->width[2] = s->width[2];
           c->dmin = dmin;
           c->depth = 0;
-          c->count = 0;
-          c->gcount = 0;
-          c->scount = 0;
+          c->split = 0;
+          c->hydro.count = 0;
+          c->grav.count = 0;
+          c->stars.count = 0;
           c->super = c;
-          c->super_hydro = c;
-          c->super_gravity = c;
-          c->ti_old_part = ti_current;
-          c->ti_old_gpart = ti_current;
-          c->ti_old_multipole = ti_current;
+          c->hydro.super = c;
+          c->grav.super = c;
+          c->hydro.ti_old_part = ti_current;
+          c->grav.ti_old_part = ti_current;
+          c->stars.ti_old_part = ti_current;
+          c->grav.ti_old_multipole = ti_current;
 #ifdef WITH_MPI
-          c->tag = -1;
+          c->mpi.tag = -1;
+          c->mpi.hydro.recv_xv = NULL;
+          c->mpi.hydro.recv_rho = NULL;
+          c->mpi.hydro.recv_gradient = NULL;
+          c->mpi.hydro.send_xv = NULL;
+          c->mpi.hydro.send_rho = NULL;
+          c->mpi.hydro.send_gradient = NULL;
+          c->mpi.stars.send = NULL;
+          c->mpi.stars.recv = NULL;
+          c->mpi.grav.recv = NULL;
+          c->mpi.grav.send = NULL;
 #endif  // WITH_MPI
-          if (s->gravity) c->multipole = &s->multipoles_top[cid];
+          if (s->with_self_gravity) c->grav.multipole = &s->multipoles_top[cid];
+#ifdef SWIFT_DEBUG_CHECKS
+          c->cellID = -last_cell_id;
+          last_cell_id++;
+#endif
         }
 
     /* Be verbose about the change. */
@@ -456,7 +592,7 @@ void space_regrid(struct space *s, int verbose) {
         /* Failed, try another technique that requires no settings. */
         message("Failed to get a new partition, trying less optimal method");
         struct partition initial_partition;
-#ifdef HAVE_METIS
+#if defined(HAVE_PARMETIS) || defined(HAVE_METIS)
         initial_partition.type = INITPART_METIS_NOWEIGHT;
 #else
         initial_partition.type = INITPART_VECTORIZE;
@@ -506,14 +642,349 @@ void space_regrid(struct space *s, int verbose) {
             clocks_getunit());
 }
 
+/**
+ * @brief Allocate memory for the extra particles used for on-the-fly creation.
+ *
+ * This rarely actually allocates memory. Most of the time, we convert
+ * pre-allocated memory inot extra particles.
+ *
+ * This function also sets the extra particles' location to their top-level
+ * cells. They can then be sorted into their correct memory position later on.
+ *
+ * @param s The current #space.
+ * @param verbose Are we talkative?
+ */
+void space_allocate_extras(struct space *s, int verbose) {
+
+  const int local_nodeID = s->e->nodeID;
+
+  /* Anything to do here? (Abort if we don't want extras)*/
+  if (space_extra_parts == 0 && space_extra_gparts == 0 &&
+      space_extra_sparts == 0)
+    return;
+
+  /* The top-level cells */
+  const struct cell *cells = s->cells_top;
+  const double half_cell_width[3] = {0.5 * cells[0].width[0],
+                                     0.5 * cells[0].width[1],
+                                     0.5 * cells[0].width[2]};
+
+  /* The current number of particles (including spare ones) */
+  size_t nr_parts = s->nr_parts;
+  size_t nr_gparts = s->nr_gparts;
+  size_t nr_sparts = s->nr_sparts;
+
+  /* The current number of actual particles */
+  size_t nr_actual_parts = nr_parts - s->nr_extra_parts;
+  size_t nr_actual_gparts = nr_gparts - s->nr_extra_gparts;
+  size_t nr_actual_sparts = nr_sparts - s->nr_extra_sparts;
+
+  /* The number of particles we allocated memory for (MPI overhead) */
+  size_t size_parts = s->size_parts;
+  size_t size_gparts = s->size_gparts;
+  size_t size_sparts = s->size_sparts;
+
+  int local_cells = 0;
+  for (int i = 0; i < s->nr_cells; ++i)
+    if (s->cells_top[i].nodeID == local_nodeID) local_cells++;
+
+  /* Number of extra particles we want for each type */
+  const size_t expected_num_extra_parts = local_cells * space_extra_parts;
+  const size_t expected_num_extra_gparts = local_cells * space_extra_gparts;
+  const size_t expected_num_extra_sparts = local_cells * space_extra_sparts;
+
+  if (verbose) {
+    message("Currently have %zd/%zd/%zd real particles.", nr_actual_parts,
+            nr_actual_gparts, nr_actual_sparts);
+    message("Currently have %zd/%zd/%zd spaces for extra particles.",
+            s->nr_extra_parts, s->nr_extra_gparts, s->nr_extra_sparts);
+    message("Requesting space for future %zd/%zd/%zd part/gpart/sparts.",
+            expected_num_extra_parts, expected_num_extra_gparts,
+            expected_num_extra_sparts);
+  }
+
+  if (expected_num_extra_parts < s->nr_extra_parts)
+    error("Reduction in top-level cells number not handled.");
+  if (expected_num_extra_gparts < s->nr_extra_gparts)
+    error("Reduction in top-level cells number not handled.");
+  if (expected_num_extra_sparts < s->nr_extra_sparts)
+    error("Reduction in top-level cells number not handled.");
+
+  /* Do we have enough space for the extra gparts (i.e. we haven't used up any)
+   * ? */
+  if (nr_gparts + expected_num_extra_gparts > size_gparts) {
+
+    /* Ok... need to put some more in the game */
+
+    /* Do we need to reallocate? */
+    if (nr_actual_gparts + expected_num_extra_gparts > size_gparts) {
+
+      size_gparts = (nr_actual_gparts + expected_num_extra_gparts) *
+                    engine_redistribute_alloc_margin;
+
+      if (verbose)
+        message("Re-allocating gparts array from %zd to %zd", s->size_gparts,
+                size_gparts);
+
+      /* Create more space for parts */
+      struct gpart *gparts_new = NULL;
+      if (posix_memalign((void **)&gparts_new, gpart_align,
+                         sizeof(struct gpart) * size_gparts) != 0)
+        error("Failed to allocate new gpart data");
+      const ptrdiff_t delta = gparts_new - s->gparts;
+      memcpy(gparts_new, s->gparts, sizeof(struct gpart) * s->size_gparts);
+      free(s->gparts);
+      s->gparts = gparts_new;
+
+      /* Update the counter */
+      s->size_gparts = size_gparts;
+
+      /* We now need to reset all the part and spart pointers */
+      for (size_t i = 0; i < nr_parts; ++i) {
+        if (s->parts[i].time_bin != time_bin_not_created)
+          s->parts[i].gpart += delta;
+      }
+      for (size_t i = 0; i < nr_sparts; ++i) {
+        if (s->sparts[i].time_bin != time_bin_not_created)
+          s->sparts[i].gpart += delta;
+      }
+    }
+
+    /* Turn some of the allocated spares into particles we can use */
+    for (size_t i = nr_gparts; i < nr_actual_gparts + expected_num_extra_gparts;
+         ++i) {
+      bzero(&s->gparts[i], sizeof(struct gpart));
+      s->gparts[i].time_bin = time_bin_not_created;
+      s->gparts[i].type = swift_type_dark_matter;
+      s->gparts[i].id_or_neg_offset = -1;
+    }
+
+      /* Put the spare particles in their correct cell */
+#ifdef WITH_MPI
+    error("Need to do this correctly over MPI for only the local cells.");
+#endif
+    int count_in_cell = 0, current_cell = 0;
+    size_t count_extra_gparts = 0;
+    for (size_t i = 0; i < nr_actual_gparts + expected_num_extra_gparts; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (current_cell == s->nr_cells)
+        error("Cell counter beyond the maximal nr. cells.");
+#endif
+
+      if (s->gparts[i].time_bin == time_bin_not_created) {
+
+        /* We want the extra particles to be at the centre of their cell */
+        s->gparts[i].x[0] = cells[current_cell].loc[0] + half_cell_width[0];
+        s->gparts[i].x[1] = cells[current_cell].loc[1] + half_cell_width[1];
+        s->gparts[i].x[2] = cells[current_cell].loc[2] + half_cell_width[2];
+        ++count_in_cell;
+        count_extra_gparts++;
+      }
+
+      /* Once we have reached the number of extra gpart per cell, we move to the
+       * next */
+      if (count_in_cell == space_extra_gparts) {
+        ++current_cell;
+        count_in_cell = 0;
+      }
+    }
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (count_extra_gparts != expected_num_extra_gparts)
+      error("Constructed the wrong number of extra gparts (%zd vs. %zd)",
+            count_extra_gparts, expected_num_extra_gparts);
+#endif
+
+    /* Update the counters */
+    s->nr_gparts = nr_actual_gparts + expected_num_extra_gparts;
+    s->nr_extra_gparts = expected_num_extra_gparts;
+  }
+
+  /* Do we have enough space for the extra parts (i.e. we haven't used up any) ?
+   */
+  if (expected_num_extra_parts > s->nr_extra_parts) {
+
+    /* Ok... need to put some more in the game */
+
+    /* Do we need to reallocate? */
+    if (nr_actual_parts + expected_num_extra_parts > size_parts) {
+
+      size_parts = (nr_actual_parts + expected_num_extra_parts) *
+                   engine_redistribute_alloc_margin;
+
+      if (verbose)
+        message("Re-allocating parts array from %zd to %zd", s->size_parts,
+                size_parts);
+
+      /* Create more space for parts */
+      struct part *parts_new = NULL;
+      if (posix_memalign((void **)&parts_new, part_align,
+                         sizeof(struct part) * size_parts) != 0)
+        error("Failed to allocate new part data");
+      memcpy(parts_new, s->parts, sizeof(struct part) * s->size_parts);
+      free(s->parts);
+      s->parts = parts_new;
+
+      /* Same for xparts */
+      struct xpart *xparts_new = NULL;
+      if (posix_memalign((void **)&xparts_new, xpart_align,
+                         sizeof(struct xpart) * size_parts) != 0)
+        error("Failed to allocate new xpart data");
+      memcpy(xparts_new, s->xparts, sizeof(struct xpart) * s->size_parts);
+      free(s->xparts);
+      s->xparts = xparts_new;
+
+      /* Update the counter */
+      s->size_parts = size_parts;
+    }
+
+    /* Turn some of the allocated spares into particles we can use */
+    for (size_t i = nr_parts; i < nr_actual_parts + expected_num_extra_parts;
+         ++i) {
+      bzero(&s->parts[i], sizeof(struct part));
+      bzero(&s->xparts[i], sizeof(struct xpart));
+      s->parts[i].time_bin = time_bin_not_created;
+      s->parts[i].id = -1;
+    }
+
+      /* Put the spare particles in their correct cell */
+#ifdef WITH_MPI
+    error("Need to do this correctly over MPI for only the local cells.");
+#endif
+    int count_in_cell = 0, current_cell = 0;
+    size_t count_extra_parts = 0;
+    for (size_t i = 0; i < nr_actual_parts + expected_num_extra_parts; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (current_cell == s->nr_cells)
+        error("Cell counter beyond the maximal nr. cells.");
+#endif
+
+      if (s->parts[i].time_bin == time_bin_not_created) {
+
+        /* We want the extra particles to be at the centre of their cell */
+        s->parts[i].x[0] = cells[current_cell].loc[0] + half_cell_width[0];
+        s->parts[i].x[1] = cells[current_cell].loc[1] + half_cell_width[1];
+        s->parts[i].x[2] = cells[current_cell].loc[2] + half_cell_width[2];
+        ++count_in_cell;
+        count_extra_parts++;
+      }
+
+      /* Once we have reached the number of extra part per cell, we move to the
+       * next */
+      if (count_in_cell == space_extra_parts) {
+        ++current_cell;
+        count_in_cell = 0;
+      }
+    }
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (count_extra_parts != expected_num_extra_parts)
+      error("Constructed the wrong number of extra parts (%zd vs. %zd)",
+            count_extra_parts, expected_num_extra_parts);
+#endif
+
+    /* Update the counters */
+    s->nr_parts = nr_actual_parts + expected_num_extra_parts;
+    s->nr_extra_parts = expected_num_extra_parts;
+  }
+
+  /* Do we have enough space for the extra sparts (i.e. we haven't used up any)
+   * ? */
+  if (nr_actual_sparts + expected_num_extra_sparts > nr_sparts) {
+
+    /* Ok... need to put some more in the game */
+
+    /* Do we need to reallocate? */
+    if (nr_actual_sparts + expected_num_extra_sparts > size_sparts) {
+
+      size_sparts = (nr_actual_sparts + expected_num_extra_sparts) *
+                    engine_redistribute_alloc_margin;
+
+      if (verbose)
+        message("Re-allocating sparts array from %zd to %zd", s->size_sparts,
+                size_sparts);
+
+      /* Create more space for parts */
+      struct spart *sparts_new = NULL;
+      if (posix_memalign((void **)&sparts_new, spart_align,
+                         sizeof(struct spart) * size_sparts) != 0)
+        error("Failed to allocate new spart data");
+      memcpy(sparts_new, s->sparts, sizeof(struct spart) * s->size_sparts);
+      free(s->sparts);
+      s->sparts = sparts_new;
+
+      /* Update the counter */
+      s->size_sparts = size_sparts;
+    }
+
+    /* Turn some of the allocated spares into particles we can use */
+    for (size_t i = nr_sparts; i < nr_actual_sparts + expected_num_extra_sparts;
+         ++i) {
+      bzero(&s->sparts[i], sizeof(struct spart));
+      s->sparts[i].time_bin = time_bin_not_created;
+      s->sparts[i].id = -42;
+    }
+
+      /* Put the spare particles in their correct cell */
+#ifdef WITH_MPI
+    error("Need to do this correctly over MPI for only the local cells.");
+#endif
+    int count_in_cell = 0, current_cell = 0;
+    size_t count_extra_sparts = 0;
+    for (size_t i = 0; i < nr_actual_sparts + expected_num_extra_sparts; ++i) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+      if (current_cell == s->nr_cells)
+        error("Cell counter beyond the maximal nr. cells.");
+#endif
+
+      if (s->sparts[i].time_bin == time_bin_not_created) {
+
+        /* We want the extra particles to be at the centre of their cell */
+        s->sparts[i].x[0] = cells[current_cell].loc[0] + half_cell_width[0];
+        s->sparts[i].x[1] = cells[current_cell].loc[1] + half_cell_width[1];
+        s->sparts[i].x[2] = cells[current_cell].loc[2] + half_cell_width[2];
+        ++count_in_cell;
+        count_extra_sparts++;
+      }
+
+      /* Once we have reached the number of extra spart per cell, we move to the
+       * next */
+      if (count_in_cell == space_extra_sparts) {
+        ++current_cell;
+        count_in_cell = 0;
+      }
+    }
+
+#ifdef SWIFT_DEBUG_CHECKS
+    if (count_extra_sparts != expected_num_extra_sparts)
+      error("Constructed the wrong number of extra sparts (%zd vs. %zd)",
+            count_extra_sparts, expected_num_extra_sparts);
+#endif
+
+    /* Update the counters */
+    s->nr_sparts = nr_actual_sparts + expected_num_extra_sparts;
+    s->nr_extra_sparts = expected_num_extra_sparts;
+  }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Verify that the links are correct */
+  if ((nr_gparts > 0 && nr_parts > 0) || (nr_gparts > 0 && nr_sparts > 0))
+    part_verify_links(s->parts, s->gparts, s->sparts, nr_parts, nr_gparts,
+                      nr_sparts, verbose);
+#endif
+}
+
 /**
  * @brief Re-build the cells as well as the tasks.
  *
  * @param s The #space in which to update the cells.
+ * @param repartitioned Did we just repartition?
  * @param verbose Print messages to stdout or not
- *
  */
-void space_rebuild(struct space *s, int verbose) {
+void space_rebuild(struct space *s, int repartitioned, int verbose) {
 
   const ticks tic = getticks();
 
@@ -526,173 +997,283 @@ void space_rebuild(struct space *s, int verbose) {
   /* Re-grid if necessary, or just re-set the cell data. */
   space_regrid(s, verbose);
 
+  /* Allocate extra space for particles that will be created */
+  if (s->with_star_formation) space_allocate_extras(s, verbose);
+
+  struct cell *cells_top = s->cells_top;
+  const integertime_t ti_current = (s->e != NULL) ? s->e->ti_current : 0;
+  const int local_nodeID = s->e->nodeID;
+
+  /* The current number of particles */
   size_t nr_parts = s->nr_parts;
   size_t nr_gparts = s->nr_gparts;
   size_t nr_sparts = s->nr_sparts;
-  struct cell *restrict cells_top = s->cells_top;
-  const integertime_t ti_current = (s->e != NULL) ? s->e->ti_current : 0;
 
-  /* Run through the particles and get their cell index. Allocates
-     an index that is larger than the number of particles to avoid
-     re-allocating after shuffling. */
-  const size_t ind_size = s->size_parts + 100;
-  int *ind = (int *)malloc(sizeof(int) * ind_size);
-  if (ind == NULL) error("Failed to allocate temporary particle indices.");
-  int *cell_part_counts = (int *)calloc(sizeof(int), s->nr_cells);
-  if (cell_part_counts == NULL)
-    error("Failed to allocate cell part count buffer.");
-  if (s->size_parts > 0)
-    space_parts_get_cell_index(s, ind, cell_part_counts, cells_top, verbose);
-
-  /* Run through the gravity particles and get their cell index. */
-  const size_t gind_size = s->size_gparts + 100;
-  int *gind = (int *)malloc(sizeof(int) * gind_size);
-  if (gind == NULL) error("Failed to allocate temporary g-particle indices.");
-  int *cell_gpart_counts = (int *)calloc(sizeof(int), s->nr_cells);
-  if (cell_gpart_counts == NULL)
-    error("Failed to allocate cell gpart count buffer.");
-  if (s->size_gparts > 0)
-    space_gparts_get_cell_index(s, gind, cell_gpart_counts, cells_top, verbose);
-
-  /* Run through the star particles and get their cell index. */
-  const size_t sind_size = s->size_sparts + 100;
-  int *sind = (int *)malloc(sizeof(int) * sind_size);
-  if (sind == NULL) error("Failed to allocate temporary s-particle indices.");
-  int *cell_spart_counts = (int *)calloc(sizeof(int), s->nr_cells);
-  if (cell_spart_counts == NULL)
-    error("Failed to allocate cell gpart count buffer.");
-  if (s->size_sparts > 0)
-    space_sparts_get_cell_index(s, sind, cell_spart_counts, cells_top, verbose);
+  /* The number of particles we allocated memory for */
+  size_t size_parts = s->size_parts;
+  size_t size_gparts = s->size_gparts;
+  size_t size_sparts = s->size_sparts;
+
+  /* Counter for the number of inhibited particles found on the node */
+  size_t count_inhibited_parts = 0;
+  size_t count_inhibited_gparts = 0;
+  size_t count_inhibited_sparts = 0;
+
+  /* Counter for the number of extra particles found on the node */
+  size_t count_extra_parts = 0;
+  size_t count_extra_gparts = 0;
+  size_t count_extra_sparts = 0;
+
+  /* Number of particles we expect to have after strays exchange */
+  const size_t h_index_size = size_parts + space_expected_max_nr_strays;
+  const size_t g_index_size = size_gparts + space_expected_max_nr_strays;
+  const size_t s_index_size = size_sparts + space_expected_max_nr_strays;
+
+  /* Allocate arrays to store the indices of the cells where particles
+     belong. We allocate extra space to allow for particles we may
+     receive from other nodes */
+  int *h_index = (int *)malloc(sizeof(int) * h_index_size);
+  int *g_index = (int *)malloc(sizeof(int) * g_index_size);
+  int *s_index = (int *)malloc(sizeof(int) * s_index_size);
+  if (h_index == NULL || g_index == NULL || s_index == NULL)
+    error("Failed to allocate temporary particle indices.");
+
+  /* Allocate counters of particles that will land in each cell */
+  int *cell_part_counts = (int *)malloc(sizeof(int) * s->nr_cells);
+  int *cell_gpart_counts = (int *)malloc(sizeof(int) * s->nr_cells);
+  int *cell_spart_counts = (int *)malloc(sizeof(int) * s->nr_cells);
+  if (cell_part_counts == NULL || cell_gpart_counts == NULL ||
+      cell_spart_counts == NULL)
+    error("Failed to allocate cell particle count buffer.");
+
+  /* Initialise the counters, including buffer space for future particles */
+  for (int i = 0; i < s->nr_cells; ++i) {
+    cell_part_counts[i] = 0;
+    cell_gpart_counts[i] = 0;
+    cell_spart_counts[i] = 0;
+  }
 
-#ifdef WITH_MPI
-  const int local_nodeID = s->e->nodeID;
+  /* Run through the particles and get their cell index. */
+  if (nr_parts > 0)
+    space_parts_get_cell_index(s, h_index, cell_part_counts,
+                               &count_inhibited_parts, &count_extra_parts,
+                               verbose);
+  if (nr_gparts > 0)
+    space_gparts_get_cell_index(s, g_index, cell_gpart_counts,
+                                &count_inhibited_gparts, &count_extra_gparts,
+                                verbose);
+  if (nr_sparts > 0)
+    space_sparts_get_cell_index(s, s_index, cell_spart_counts,
+                                &count_inhibited_sparts, &count_extra_sparts,
+                                verbose);
 
-  /* Move non-local parts to the end of the list. */
-  for (size_t k = 0; k < nr_parts;) {
-    if (cells_top[ind[k]].nodeID != local_nodeID) {
-      nr_parts -= 1;
-      /* Swap the particle */
-      memswap(&s->parts[k], &s->parts[nr_parts], sizeof(struct part));
-      /* Swap the link with the gpart */
-      if (s->parts[k].gpart != NULL) {
-        s->parts[k].gpart->id_or_neg_offset = -k;
-      }
-      if (s->parts[nr_parts].gpart != NULL) {
-        s->parts[nr_parts].gpart->id_or_neg_offset = -nr_parts;
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Some safety checks */
+  if (repartitioned && count_inhibited_parts)
+    error("We just repartitioned but still found inhibited parts.");
+  if (repartitioned && count_inhibited_sparts)
+    error("We just repartitioned but still found inhibited sparts.");
+  if (repartitioned && count_inhibited_gparts)
+    error("We just repartitioned but still found inhibited gparts.");
+
+  if (count_extra_parts != s->nr_extra_parts)
+    error(
+        "Number of extra parts in the part array not matching the space "
+        "counter.");
+  if (count_extra_gparts != s->nr_extra_gparts)
+    error(
+        "Number of extra gparts in the gpart array not matching the space "
+        "counter.");
+  if (count_extra_sparts != s->nr_extra_sparts)
+    error(
+        "Number of extra sparts in the spart array not matching the space "
+        "counter.");
+#endif
+
+  /* Move non-local parts and inhibited parts to the end of the list. */
+  if (!repartitioned && (s->e->nr_nodes > 1 || count_inhibited_parts > 0)) {
+    for (size_t k = 0; k < nr_parts; /* void */) {
+
+      /* Inhibited particle or foreign particle */
+      if (h_index[k] == -1 || cells_top[h_index[k]].nodeID != local_nodeID) {
+
+        /* One fewer particle */
+        nr_parts -= 1;
+
+        /* Swap the particle */
+        memswap(&s->parts[k], &s->parts[nr_parts], sizeof(struct part));
+
+        /* Swap the link with the gpart */
+        if (s->parts[k].gpart != NULL) {
+          s->parts[k].gpart->id_or_neg_offset = -k;
+        }
+        if (s->parts[nr_parts].gpart != NULL) {
+          s->parts[nr_parts].gpart->id_or_neg_offset = -nr_parts;
+        }
+
+        /* Swap the xpart */
+        memswap(&s->xparts[k], &s->xparts[nr_parts], sizeof(struct xpart));
+        /* Swap the index */
+        memswap(&h_index[k], &h_index[nr_parts], sizeof(int));
+
+      } else {
+        /* Increment when not exchanging otherwise we need to retest "k".*/
+        k++;
       }
-      /* Swap the xpart */
-      memswap(&s->xparts[k], &s->xparts[nr_parts], sizeof(struct xpart));
-      /* Swap the index */
-      memswap(&ind[k], &ind[nr_parts], sizeof(int));
-    } else {
-      /* Increment when not exchanging otherwise we need to retest "k".*/
-      k++;
     }
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that all parts are in the correct places. */
+  size_t check_count_inhibited_part = 0;
   for (size_t k = 0; k < nr_parts; k++) {
-    if (cells_top[ind[k]].nodeID != local_nodeID) {
+    if (h_index[k] == -1 || cells_top[h_index[k]].nodeID != local_nodeID) {
       error("Failed to move all non-local parts to send list");
     }
   }
   for (size_t k = nr_parts; k < s->nr_parts; k++) {
-    if (cells_top[ind[k]].nodeID == local_nodeID) {
+    if (h_index[k] != -1 && cells_top[h_index[k]].nodeID == local_nodeID) {
       error("Failed to remove local parts from send list");
     }
+    if (h_index[k] == -1) ++check_count_inhibited_part;
   }
-#endif
+  if (check_count_inhibited_part != count_inhibited_parts)
+    error("Counts of inhibited particles do not match!");
+#endif /* SWIFT_DEBUG_CHECKS */
 
-  /* Move non-local sparts to the end of the list. */
-  for (size_t k = 0; k < nr_sparts;) {
-    if (cells_top[sind[k]].nodeID != local_nodeID) {
-      nr_sparts -= 1;
-      /* Swap the particle */
-      memswap(&s->sparts[k], &s->sparts[nr_sparts], sizeof(struct spart));
-      /* Swap the link with the gpart */
-      if (s->sparts[k].gpart != NULL) {
-        s->sparts[k].gpart->id_or_neg_offset = -k;
-      }
-      if (s->sparts[nr_sparts].gpart != NULL) {
-        s->sparts[nr_sparts].gpart->id_or_neg_offset = -nr_sparts;
+  /* Move non-local sparts and inhibited sparts to the end of the list. */
+  if (!repartitioned && (s->e->nr_nodes > 1 || count_inhibited_sparts > 0)) {
+    for (size_t k = 0; k < nr_sparts; /* void */) {
+
+      /* Inhibited particle or foreign particle */
+      if (s_index[k] == -1 || cells_top[s_index[k]].nodeID != local_nodeID) {
+
+        /* One fewer particle */
+        nr_sparts -= 1;
+
+        /* Swap the particle */
+        memswap(&s->sparts[k], &s->sparts[nr_sparts], sizeof(struct spart));
+
+        /* Swap the link with the gpart */
+        if (s->sparts[k].gpart != NULL) {
+          s->sparts[k].gpart->id_or_neg_offset = -k;
+        }
+        if (s->sparts[nr_sparts].gpart != NULL) {
+          s->sparts[nr_sparts].gpart->id_or_neg_offset = -nr_sparts;
+        }
+
+        /* Swap the index */
+        memswap(&s_index[k], &s_index[nr_sparts], sizeof(int));
+
+      } else {
+        /* Increment when not exchanging otherwise we need to retest "k".*/
+        k++;
       }
-      /* Swap the index */
-      memswap(&sind[k], &sind[nr_sparts], sizeof(int));
-    } else {
-      /* Increment when not exchanging otherwise we need to retest "k".*/
-      k++;
     }
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
-  /* Check that all sparts are in the correct place (untested). */
+  /* Check that all sparts are in the correct place. */
+  size_t check_count_inhibited_spart = 0;
   for (size_t k = 0; k < nr_sparts; k++) {
-    if (cells_top[sind[k]].nodeID != local_nodeID) {
+    if (s_index[k] == -1 || cells_top[s_index[k]].nodeID != local_nodeID) {
       error("Failed to move all non-local sparts to send list");
     }
   }
   for (size_t k = nr_sparts; k < s->nr_sparts; k++) {
-    if (cells_top[sind[k]].nodeID == local_nodeID) {
+    if (s_index[k] != -1 && cells_top[s_index[k]].nodeID == local_nodeID) {
       error("Failed to remove local sparts from send list");
     }
+    if (s_index[k] == -1) ++check_count_inhibited_spart;
   }
-#endif
+  if (check_count_inhibited_spart != count_inhibited_sparts)
+    error("Counts of inhibited s-particles do not match!");
+#endif /* SWIFT_DEBUG_CHECKS */
 
-  /* Move non-local gparts to the end of the list. */
-  for (size_t k = 0; k < nr_gparts;) {
-    if (cells_top[gind[k]].nodeID != local_nodeID) {
-      nr_gparts -= 1;
-      /* Swap the particle */
-      memswap(&s->gparts[k], &s->gparts[nr_gparts], sizeof(struct gpart));
-      /* Swap the link with part/spart */
-      if (s->gparts[k].type == swift_type_gas) {
-        s->parts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
-      } else if (s->gparts[k].type == swift_type_star) {
-        s->sparts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
-      }
-      if (s->gparts[nr_gparts].type == swift_type_gas) {
-        s->parts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
-            &s->gparts[nr_gparts];
-      } else if (s->gparts[nr_gparts].type == swift_type_star) {
-        s->sparts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
-            &s->gparts[nr_gparts];
+  /* Move non-local gparts and inhibited parts to the end of the list. */
+  if (!repartitioned && (s->e->nr_nodes > 1 || count_inhibited_gparts > 0)) {
+    for (size_t k = 0; k < nr_gparts; /* void */) {
+
+      /* Inhibited particle or foreign particle */
+      if (g_index[k] == -1 || cells_top[g_index[k]].nodeID != local_nodeID) {
+
+        /* One fewer particle */
+        nr_gparts -= 1;
+
+        /* Swap the particle */
+        memswap(&s->gparts[k], &s->gparts[nr_gparts], sizeof(struct gpart));
+
+        /* Swap the link with part/spart */
+        if (s->gparts[k].type == swift_type_gas) {
+          s->parts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
+        } else if (s->gparts[k].type == swift_type_stars) {
+          s->sparts[-s->gparts[k].id_or_neg_offset].gpart = &s->gparts[k];
+        }
+        if (s->gparts[nr_gparts].type == swift_type_gas) {
+          s->parts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
+              &s->gparts[nr_gparts];
+        } else if (s->gparts[nr_gparts].type == swift_type_stars) {
+          s->sparts[-s->gparts[nr_gparts].id_or_neg_offset].gpart =
+              &s->gparts[nr_gparts];
+        }
+
+        /* Swap the index */
+        memswap(&g_index[k], &g_index[nr_gparts], sizeof(int));
+      } else {
+        /* Increment when not exchanging otherwise we need to retest "k".*/
+        k++;
       }
-      /* Swap the index */
-      memswap(&gind[k], &gind[nr_gparts], sizeof(int));
-    } else {
-      /* Increment when not exchanging otherwise we need to retest "k".*/
-      k++;
     }
   }
 
 #ifdef SWIFT_DEBUG_CHECKS
-  /* Check that all gparts are in the correct place (untested). */
+  /* Check that all gparts are in the correct place. */
+  size_t check_count_inhibited_gpart = 0;
   for (size_t k = 0; k < nr_gparts; k++) {
-    if (cells_top[gind[k]].nodeID != local_nodeID) {
+    if (g_index[k] == -1 || cells_top[g_index[k]].nodeID != local_nodeID) {
       error("Failed to move all non-local gparts to send list");
     }
   }
   for (size_t k = nr_gparts; k < s->nr_gparts; k++) {
-    if (cells_top[gind[k]].nodeID == local_nodeID) {
+    if (g_index[k] != -1 && cells_top[g_index[k]].nodeID == local_nodeID) {
       error("Failed to remove local gparts from send list");
     }
+    if (g_index[k] == -1) ++check_count_inhibited_gpart;
   }
-#endif
+  if (check_count_inhibited_gpart != count_inhibited_gparts)
+    error("Counts of inhibited g-particles do not match!");
+#endif /* SWIFT_DEBUG_CHECKS */
+
+#ifdef WITH_MPI
 
   /* Exchange the strays, note that this potentially re-allocates
-     the parts arrays. */
-  size_t nr_parts_exchanged = s->nr_parts - nr_parts;
-  size_t nr_gparts_exchanged = s->nr_gparts - nr_gparts;
-  size_t nr_sparts_exchanged = s->nr_sparts - nr_sparts;
-  engine_exchange_strays(s->e, nr_parts, &ind[nr_parts], &nr_parts_exchanged,
-                         nr_gparts, &gind[nr_gparts], &nr_gparts_exchanged,
-                         nr_sparts, &sind[nr_sparts], &nr_sparts_exchanged);
-
-  /* Set the new particle counts. */
-  s->nr_parts = nr_parts + nr_parts_exchanged;
-  s->nr_gparts = nr_gparts + nr_gparts_exchanged;
-  s->nr_sparts = nr_sparts + nr_sparts_exchanged;
+     the parts arrays. This can be skipped if we just repartitioned space
+     as there should be no strays in that case */
+  if (!repartitioned) {
+
+    size_t nr_parts_exchanged = s->nr_parts - nr_parts;
+    size_t nr_gparts_exchanged = s->nr_gparts - nr_gparts;
+    size_t nr_sparts_exchanged = s->nr_sparts - nr_sparts;
+    engine_exchange_strays(s->e, nr_parts, &h_index[nr_parts],
+                           &nr_parts_exchanged, nr_gparts, &g_index[nr_gparts],
+                           &nr_gparts_exchanged, nr_sparts, &s_index[nr_sparts],
+                           &nr_sparts_exchanged);
+
+    /* Set the new particle counts. */
+    s->nr_parts = nr_parts + nr_parts_exchanged;
+    s->nr_gparts = nr_gparts + nr_gparts_exchanged;
+    s->nr_sparts = nr_sparts + nr_sparts_exchanged;
+
+  } else {
+#ifdef SWIFT_DEBUG_CHECKS
+    if (s->nr_parts != nr_parts)
+      error("Number of parts changing after repartition");
+    if (s->nr_sparts != nr_sparts)
+      error("Number of sparts changing after repartition");
+    if (s->nr_gparts != nr_gparts)
+      error("Number of gparts changing after repartition");
+#endif
+  }
 
   /* Clear non-local cell counts. */
   for (int k = 0; k < s->nr_cells; k++) {
@@ -704,23 +1285,23 @@ void space_rebuild(struct space *s, int verbose) {
   }
 
   /* Re-allocate the index array for the parts if needed.. */
-  if (s->nr_parts + 1 > ind_size) {
+  if (s->nr_parts + 1 > h_index_size) {
     int *ind_new;
     if ((ind_new = (int *)malloc(sizeof(int) * (s->nr_parts + 1))) == NULL)
       error("Failed to allocate temporary particle indices.");
-    memcpy(ind_new, ind, sizeof(int) * nr_parts);
-    free(ind);
-    ind = ind_new;
+    memcpy(ind_new, h_index, sizeof(int) * nr_parts);
+    free(h_index);
+    h_index = ind_new;
   }
 
   /* Re-allocate the index array for the sparts if needed.. */
-  if (s->nr_sparts + 1 > sind_size) {
+  if (s->nr_sparts + 1 > s_index_size) {
     int *sind_new;
     if ((sind_new = (int *)malloc(sizeof(int) * (s->nr_sparts + 1))) == NULL)
       error("Failed to allocate temporary s-particle indices.");
-    memcpy(sind_new, sind, sizeof(int) * nr_sparts);
-    free(sind);
-    sind = sind_new;
+    memcpy(sind_new, s_index, sizeof(int) * nr_sparts);
+    free(s_index);
+    s_index = sind_new;
   }
 
   const int cdim[3] = {s->cdim[0], s->cdim[1], s->cdim[2]};
@@ -729,13 +1310,13 @@ void space_rebuild(struct space *s, int verbose) {
   /* Assign each received part to its cell. */
   for (size_t k = nr_parts; k < s->nr_parts; k++) {
     const struct part *const p = &s->parts[k];
-    ind[k] =
+    h_index[k] =
         cell_getid(cdim, p->x[0] * ih[0], p->x[1] * ih[1], p->x[2] * ih[2]);
-    cell_part_counts[ind[k]]++;
+    cell_part_counts[h_index[k]]++;
 #ifdef SWIFT_DEBUG_CHECKS
-    if (cells_top[ind[k]].nodeID != local_nodeID)
+    if (cells_top[h_index[k]].nodeID != local_nodeID)
       error("Received part that does not belong to me (nodeID=%i).",
-            cells_top[ind[k]].nodeID);
+            cells_top[h_index[k]].nodeID);
 #endif
   }
   nr_parts = s->nr_parts;
@@ -743,29 +1324,38 @@ void space_rebuild(struct space *s, int verbose) {
   /* Assign each received spart to its cell. */
   for (size_t k = nr_sparts; k < s->nr_sparts; k++) {
     const struct spart *const sp = &s->sparts[k];
-    sind[k] =
+    s_index[k] =
         cell_getid(cdim, sp->x[0] * ih[0], sp->x[1] * ih[1], sp->x[2] * ih[2]);
-    cell_spart_counts[sind[k]]++;
+    cell_spart_counts[s_index[k]]++;
 #ifdef SWIFT_DEBUG_CHECKS
-    if (cells_top[sind[k]].nodeID != local_nodeID)
+    if (cells_top[s_index[k]].nodeID != local_nodeID)
       error("Received s-part that does not belong to me (nodeID=%i).",
-            cells_top[sind[k]].nodeID);
+            cells_top[s_index[k]].nodeID);
 #endif
   }
   nr_sparts = s->nr_sparts;
 
+#else /* WITH_MPI */
+
+  /* Update the part and spart counters */
+  s->nr_parts = nr_parts;
+  s->nr_sparts = nr_sparts;
+
 #endif /* WITH_MPI */
 
   /* Sort the parts according to their cells. */
   if (nr_parts > 0)
-    space_parts_sort(s->parts, s->xparts, ind, cell_part_counts, s->nr_cells,
-                     0);
+    space_parts_sort(s->parts, s->xparts, h_index, cell_part_counts,
+                     s->nr_cells, 0);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the part have been sorted correctly. */
   for (size_t k = 0; k < nr_parts; k++) {
     const struct part *p = &s->parts[k];
 
+    if (p->time_bin == time_bin_inhibited)
+      error("Inhibited particle sorted into a cell!");
+
     /* New cell index */
     const int new_ind =
         cell_getid(s->cdim, p->x[0] * s->iwidth[0], p->x[1] * s->iwidth[1],
@@ -774,7 +1364,7 @@ void space_rebuild(struct space *s, int verbose) {
     /* New cell of this part */
     const struct cell *c = &s->cells_top[new_ind];
 
-    if (ind[k] != new_ind)
+    if (h_index[k] != new_ind)
       error("part's new cell index not matching sorted index.");
 
     if (p->x[0] < c->loc[0] || p->x[0] > c->loc[0] + c->width[0] ||
@@ -782,17 +1372,20 @@ void space_rebuild(struct space *s, int verbose) {
         p->x[2] < c->loc[2] || p->x[2] > c->loc[2] + c->width[2])
       error("part not sorted into the right top-level cell!");
   }
-#endif
+#endif /* SWIFT_DEBUG_CHECKS */
 
   /* Sort the sparts according to their cells. */
   if (nr_sparts > 0)
-    space_sparts_sort(s->sparts, sind, cell_spart_counts, s->nr_cells, 0);
+    space_sparts_sort(s->sparts, s_index, cell_spart_counts, s->nr_cells, 0);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the spart have been sorted correctly. */
   for (size_t k = 0; k < nr_sparts; k++) {
     const struct spart *sp = &s->sparts[k];
 
+    if (sp->time_bin == time_bin_inhibited)
+      error("Inhibited particle sorted into a cell!");
+
     /* New cell index */
     const int new_sind =
         cell_getid(s->cdim, sp->x[0] * s->iwidth[0], sp->x[1] * s->iwidth[1],
@@ -801,7 +1394,7 @@ void space_rebuild(struct space *s, int verbose) {
     /* New cell of this spart */
     const struct cell *c = &s->cells_top[new_sind];
 
-    if (sind[k] != new_sind)
+    if (s_index[k] != new_sind)
       error("spart's new cell index not matching sorted index.");
 
     if (sp->x[0] < c->loc[0] || sp->x[0] > c->loc[0] + c->width[0] ||
@@ -809,72 +1402,89 @@ void space_rebuild(struct space *s, int verbose) {
         sp->x[2] < c->loc[2] || sp->x[2] > c->loc[2] + c->width[2])
       error("spart not sorted into the right top-level cell!");
   }
-#endif
+#endif /* SWIFT_DEBUG_CHECKS */
 
-  /* Extract the cell counts from the sorted indices. */
+  /* Extract the cell counts from the sorted indices. Deduct the extra
+   * particles. */
   size_t last_index = 0;
-  ind[nr_parts] = s->nr_cells;  // sentinel.
+  h_index[nr_parts] = s->nr_cells;  // sentinel.
   for (size_t k = 0; k < nr_parts; k++) {
-    if (ind[k] < ind[k + 1]) {
-      cells_top[ind[k]].count = k - last_index + 1;
+    if (h_index[k] < h_index[k + 1]) {
+      cells_top[h_index[k]].hydro.count =
+          k - last_index + 1 - space_extra_parts;
       last_index = k + 1;
     }
   }
 
-  /* Extract the cell counts from the sorted indices. */
+  /* Extract the cell counts from the sorted indices. Deduct the extra
+   * particles. */
   size_t last_sindex = 0;
-  sind[nr_sparts] = s->nr_cells;  // sentinel.
+  s_index[nr_sparts] = s->nr_cells;  // sentinel.
   for (size_t k = 0; k < nr_sparts; k++) {
-    if (sind[k] < sind[k + 1]) {
-      cells_top[sind[k]].scount = k - last_sindex + 1;
+    if (s_index[k] < s_index[k + 1]) {
+      cells_top[s_index[k]].stars.count =
+          k - last_sindex + 1 - space_extra_sparts;
       last_sindex = k + 1;
     }
   }
 
   /* We no longer need the indices as of here. */
-  free(ind);
+  free(h_index);
   free(cell_part_counts);
-  free(sind);
+  free(s_index);
   free(cell_spart_counts);
 
 #ifdef WITH_MPI
 
   /* Re-allocate the index array for the gparts if needed.. */
-  if (s->nr_gparts + 1 > gind_size) {
+  if (s->nr_gparts + 1 > g_index_size) {
     int *gind_new;
     if ((gind_new = (int *)malloc(sizeof(int) * (s->nr_gparts + 1))) == NULL)
       error("Failed to allocate temporary g-particle indices.");
-    memcpy(gind_new, gind, sizeof(int) * nr_gparts);
-    free(gind);
-    gind = gind_new;
+    memcpy(gind_new, g_index, sizeof(int) * nr_gparts);
+    free(g_index);
+    g_index = gind_new;
   }
 
   /* Assign each received gpart to its cell. */
   for (size_t k = nr_gparts; k < s->nr_gparts; k++) {
     const struct gpart *const p = &s->gparts[k];
-    gind[k] =
+    g_index[k] =
         cell_getid(cdim, p->x[0] * ih[0], p->x[1] * ih[1], p->x[2] * ih[2]);
-    cell_gpart_counts[gind[k]]++;
+    cell_gpart_counts[g_index[k]]++;
 #ifdef SWIFT_DEBUG_CHECKS
-    if (cells_top[gind[k]].nodeID != s->e->nodeID)
+    if (cells_top[g_index[k]].nodeID != s->e->nodeID)
       error("Received g-part that does not belong to me (nodeID=%i).",
-            cells_top[gind[k]].nodeID);
+            cells_top[g_index[k]].nodeID);
 #endif
   }
   nr_gparts = s->nr_gparts;
 
+#else /* WITH_MPI */
+
+  /* Update the gpart counter */
+  s->nr_gparts = nr_gparts;
+
 #endif /* WITH_MPI */
 
+  /* Mark that there are no inhibited particles left */
+  s->nr_inhibited_parts = 0;
+  s->nr_inhibited_gparts = 0;
+  s->nr_inhibited_sparts = 0;
+
   /* Sort the gparts according to their cells. */
   if (nr_gparts > 0)
-    space_gparts_sort(s->gparts, s->parts, s->sparts, gind, cell_gpart_counts,
-                      s->nr_cells);
+    space_gparts_sort(s->gparts, s->parts, s->sparts, g_index,
+                      cell_gpart_counts, s->nr_cells);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Verify that the gpart have been sorted correctly. */
   for (size_t k = 0; k < nr_gparts; k++) {
     const struct gpart *gp = &s->gparts[k];
 
+    if (gp->time_bin == time_bin_inhibited)
+      error("Inhibited particle sorted into a cell!");
+
     /* New cell index */
     const int new_gind =
         cell_getid(s->cdim, gp->x[0] * s->iwidth[0], gp->x[1] * s->iwidth[1],
@@ -883,7 +1493,7 @@ void space_rebuild(struct space *s, int verbose) {
     /* New cell of this gpart */
     const struct cell *c = &s->cells_top[new_gind];
 
-    if (gind[k] != new_gind)
+    if (g_index[k] != new_gind)
       error("gpart's new cell index not matching sorted index.");
 
     if (gp->x[0] < c->loc[0] || gp->x[0] > c->loc[0] + c->width[0] ||
@@ -891,20 +1501,22 @@ void space_rebuild(struct space *s, int verbose) {
         gp->x[2] < c->loc[2] || gp->x[2] > c->loc[2] + c->width[2])
       error("gpart not sorted into the right top-level cell!");
   }
-#endif
+#endif /* SWIFT_DEBUG_CHECKS */
 
-  /* Extract the cell counts from the sorted indices. */
+  /* Extract the cell counts from the sorted indices. Deduct the extra
+   * particles. */
   size_t last_gindex = 0;
-  gind[nr_gparts] = s->nr_cells;
+  g_index[nr_gparts] = s->nr_cells;
   for (size_t k = 0; k < nr_gparts; k++) {
-    if (gind[k] < gind[k + 1]) {
-      cells_top[gind[k]].gcount = k - last_gindex + 1;
+    if (g_index[k] < g_index[k + 1]) {
+      cells_top[g_index[k]].grav.count =
+          k - last_gindex + 1 - space_extra_gparts;
       last_gindex = k + 1;
     }
   }
 
   /* We no longer need the indices as of here. */
-  free(gind);
+  free(g_index);
   free(cell_gpart_counts);
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -914,40 +1526,80 @@ void space_rebuild(struct space *s, int verbose) {
                       nr_sparts, verbose);
 #endif
 
-  /* Hook the cells up to the parts. */
-  // tic = getticks();
+  /* Hook the cells up to the parts. Make list of local and non-empty cells */
+  ticks tic2 = getticks();
   struct part *finger = s->parts;
   struct xpart *xfinger = s->xparts;
   struct gpart *gfinger = s->gparts;
   struct spart *sfinger = s->sparts;
+  s->nr_cells_with_particles = 0;
+  s->nr_local_cells_with_particles = 0;
+  s->nr_local_cells = 0;
   for (int k = 0; k < s->nr_cells; k++) {
     struct cell *restrict c = &cells_top[k];
-    c->ti_old_part = ti_current;
-    c->ti_old_gpart = ti_current;
-    c->ti_old_multipole = ti_current;
-    if (c->nodeID == engine_rank) {
-      c->parts = finger;
-      c->xparts = xfinger;
-      c->gparts = gfinger;
-      c->sparts = sfinger;
-      finger = &finger[c->count];
-      xfinger = &xfinger[c->count];
-      gfinger = &gfinger[c->gcount];
-      sfinger = &sfinger[c->scount];
-    }
-  }
-  // message( "hooking up cells took %.3f %s." ,
-  // clocks_from_ticks(getticks() - tic), clocks_getunit());
-
-  /* At this point, we have the upper-level cells, old or new. Now make
-     sure that the parts in each cell are ok. */
-  space_split(s, cells_top, s->nr_cells, verbose);
+    c->hydro.ti_old_part = ti_current;
+    c->grav.ti_old_part = ti_current;
+    c->grav.ti_old_multipole = ti_current;
+    c->stars.ti_old_part = ti_current;
+
+#ifdef SWIFT_DEBUG_CHECKS
+    c->cellID = -last_cell_id;
+    last_cell_id++;
+#endif
+
+    const int is_local = (c->nodeID == engine_rank);
+    const int has_particles =
+        (c->hydro.count > 0) || (c->grav.count > 0) || (c->stars.count > 0);
+
+    if (is_local) {
+      c->hydro.parts = finger;
+      c->hydro.xparts = xfinger;
+      c->grav.parts = gfinger;
+      c->stars.parts = sfinger;
+
+      c->hydro.count_total = c->hydro.count + space_extra_parts;
+      c->grav.count_total = c->grav.count + space_extra_gparts;
+      c->stars.count_total = c->stars.count + space_extra_sparts;
+
+      finger = &finger[c->hydro.count_total];
+      xfinger = &xfinger[c->hydro.count_total];
+      gfinger = &gfinger[c->grav.count_total];
+      sfinger = &sfinger[c->stars.count_total];
+
+      /* Add this cell to the list of local cells */
+      s->local_cells_top[s->nr_local_cells] = k;
+      s->nr_local_cells++;
+    }
+
+    if (is_local && has_particles) {
+
+      /* Add this cell to the list of non-empty cells */
+      s->local_cells_with_particles_top[s->nr_local_cells_with_particles] = k;
+      s->nr_local_cells_with_particles++;
+    }
+  }
+  if (verbose) {
+    message("Have %d local top-level cells with particles (total=%d)",
+            s->nr_local_cells_with_particles, s->nr_cells);
+    message("Have %d local top-level cells (total=%d)", s->nr_local_cells,
+            s->nr_cells);
+    message("hooking up cells took %.3f %s.",
+            clocks_from_ticks(getticks() - tic2), clocks_getunit());
+  }
+
+  /* Re-order the extra particles such that they are at the end of their cell's
+     memory pool. */
+  if (s->with_star_formation) space_reorder_extras(s, verbose);
+
+  /* At this point, we have the upper-level cells. Now recursively split each
+     cell to get the full AMR grid. */
+  space_split(s, verbose);
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* Check that the multipole construction went OK */
-  if (s->gravity)
+  if (s->with_self_gravity)
     for (int k = 0; k < s->nr_cells; k++)
-      cell_check_multipole(&s->cells_top[k], NULL);
+      cell_check_multipole(&s->cells_top[k]);
 #endif
 
   /* Clean up any stray sort indices in the cell buffer. */
@@ -959,28 +1611,96 @@ void space_rebuild(struct space *s, int verbose) {
 }
 
 /**
- * @brief Split particles between cells of a hierarchy
+ * @brief Split particles between cells of a hierarchy.
  *
  * This is done in parallel using threads in the #threadpool.
+ * Only do this for the local non-empty top-level cells.
  *
  * @param s The #space.
- * @param cells The cell hierarchy.
- * @param nr_cells The number of cells.
  * @param verbose Are we talkative ?
  */
-void space_split(struct space *s, struct cell *cells, int nr_cells,
-                 int verbose) {
+void space_split(struct space *s, int verbose) {
 
   const ticks tic = getticks();
 
-  threadpool_map(&s->e->threadpool, space_split_mapper, cells, nr_cells,
-                 sizeof(struct cell), 0, s);
+  threadpool_map(&s->e->threadpool, space_split_mapper,
+                 s->local_cells_with_particles_top,
+                 s->nr_local_cells_with_particles, sizeof(int), 0, s);
 
   if (verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
             clocks_getunit());
 }
 
+void space_reorder_extra_parts_mapper(void *map_data, int num_cells,
+                                      void *extra_data) {
+
+  struct cell *cells_top = (struct cell *)map_data;
+  struct space *s = (struct space *)extra_data;
+
+  for (int ind = 0; ind < num_cells; ind++) {
+    struct cell *c = &cells_top[ind];
+    cell_reorder_extra_parts(c, c->hydro.parts - s->parts);
+  }
+}
+
+void space_reorder_extra_gparts_mapper(void *map_data, int num_cells,
+                                       void *extra_data) {
+
+  struct cell *cells_top = (struct cell *)map_data;
+  struct space *s = (struct space *)extra_data;
+
+  for (int ind = 0; ind < num_cells; ind++) {
+    struct cell *c = &cells_top[ind];
+    cell_reorder_extra_gparts(c, s->parts, s->sparts);
+  }
+}
+
+void space_reorder_extra_sparts_mapper(void *map_data, int num_cells,
+                                       void *extra_data) {
+
+  struct cell *cells_top = (struct cell *)map_data;
+  struct space *s = (struct space *)extra_data;
+
+  for (int ind = 0; ind < num_cells; ind++) {
+    struct cell *c = &cells_top[ind];
+    cell_reorder_extra_sparts(c, c->stars.parts - s->sparts);
+  }
+}
+
+/**
+ * @brief Re-orders the particles in each cell such that the extra particles
+ * for on-the-fly creation are located at the end of their respective cells.
+ *
+ * This assumes that all the particles (real and extra) have already been sorted
+ * in their correct top-level cell.
+ *
+ * @param s The #space to act upon.
+ * @param verbose Are we talkative?
+ */
+void space_reorder_extras(struct space *s, int verbose) {
+
+#ifdef WITH_MPI
+  if (space_extra_parts || space_extra_gparts || space_extra_sparts)
+    error("Need an MPI-proof version of this.");
+#endif
+
+  /* Re-order the gas particles */
+  if (space_extra_parts)
+    threadpool_map(&s->e->threadpool, space_reorder_extra_parts_mapper,
+                   s->cells_top, s->nr_cells, sizeof(struct cell), 0, s);
+
+  /* Re-order the gravity particles */
+  if (space_extra_gparts)
+    threadpool_map(&s->e->threadpool, space_reorder_extra_gparts_mapper,
+                   s->cells_top, s->nr_cells, sizeof(struct cell), 0, s);
+
+  /* Re-order the star particles */
+  if (space_extra_sparts)
+    threadpool_map(&s->e->threadpool, space_reorder_extra_sparts_mapper,
+                   s->cells_top, s->nr_cells, sizeof(struct cell), 0, s);
+}
+
 /**
  * @brief #threadpool mapper function to sanitize the cells
  *
@@ -1044,6 +1764,8 @@ void space_parts_get_cell_index_mapper(void *map_data, int nr_parts,
   /* Init the local collectors */
   float min_mass = FLT_MAX;
   float sum_vel_norm = 0.f;
+  size_t count_inhibited_part = 0;
+  size_t count_extra_part = 0;
 
   /* Loop over the parts. */
   for (int k = 0; k < nr_parts; k++) {
@@ -1055,6 +1777,17 @@ void space_parts_get_cell_index_mapper(void *map_data, int nr_parts,
     const double old_pos_y = p->x[1];
     const double old_pos_z = p->x[2];
 
+#ifdef SWIFT_DEBUG_CHECKS
+    if (!s->periodic) {
+      if (old_pos_x < 0. || old_pos_x > dim_x)
+        error("Particle outside of volume along X.");
+      if (old_pos_y < 0. || old_pos_y > dim_y)
+        error("Particle outside of volume along Y.");
+      if (old_pos_z < 0. || old_pos_z > dim_z)
+        error("Particle outside of volume along Z.");
+    }
+#endif
+
     /* Put it back into the simulation volume */
     const double pos_x = box_wrap(old_pos_x, 0.0, dim_x);
     const double pos_y = box_wrap(old_pos_y, 0.0, dim_y);
@@ -1075,19 +1808,31 @@ void space_parts_get_cell_index_mapper(void *map_data, int nr_parts,
             pos_z);
 #endif
 
-    ind[k] = index;
-    cell_counts[index]++;
+    if (p->time_bin == time_bin_inhibited) {
+      /* Is this particle to be removed? */
+      ind[k] = -1;
+      ++count_inhibited_part;
+    } else if (p->time_bin == time_bin_not_created) {
+      /* Is this a place-holder for on-the-fly creation? */
+      ind[k] = index;
+      cell_counts[index]++;
+      ++count_extra_part;
+    } else {
+      /* Normal case: list its top-level cell index */
+      ind[k] = index;
+      cell_counts[index]++;
 
-    /* Compute minimal mass */
-    min_mass = min(min_mass, hydro_get_mass(p));
+      /* Compute minimal mass */
+      min_mass = min(min_mass, hydro_get_mass(p));
 
-    /* Compute sum of velocity norm */
-    sum_vel_norm += p->v[0] * p->v[0] + p->v[1] * p->v[1] + p->v[2] * p->v[2];
+      /* Compute sum of velocity norm */
+      sum_vel_norm += p->v[0] * p->v[0] + p->v[1] * p->v[1] + p->v[2] * p->v[2];
 
-    /* Update the position */
-    p->x[0] = pos_x;
-    p->x[1] = pos_y;
-    p->x[2] = pos_z;
+      /* Update the position */
+      p->x[0] = pos_x;
+      p->x[1] = pos_y;
+      p->x[2] = pos_z;
+    }
   }
 
   /* Write the counts back to the global array. */
@@ -1095,6 +1840,11 @@ void space_parts_get_cell_index_mapper(void *map_data, int nr_parts,
     if (cell_counts[k]) atomic_add(&data->cell_counts[k], cell_counts[k]);
   free(cell_counts);
 
+  /* Write the count of inhibited and extra parts */
+  if (count_inhibited_part)
+    atomic_add(&data->count_inhibited_part, count_inhibited_part);
+  if (count_extra_part) atomic_add(&data->count_extra_part, count_extra_part);
+
   /* Write back the minimal part mass and velocity sum */
   atomic_min_f(&s->min_part_mass, min_mass);
   atomic_add_f(&s->sum_part_vel_norm, sum_vel_norm);
@@ -1133,6 +1883,8 @@ void space_gparts_get_cell_index_mapper(void *map_data, int nr_gparts,
   /* Init the local collectors */
   float min_mass = FLT_MAX;
   float sum_vel_norm = 0.f;
+  size_t count_inhibited_gpart = 0;
+  size_t count_extra_gpart = 0;
 
   for (int k = 0; k < nr_gparts; k++) {
 
@@ -1143,6 +1895,17 @@ void space_gparts_get_cell_index_mapper(void *map_data, int nr_gparts,
     const double old_pos_y = gp->x[1];
     const double old_pos_z = gp->x[2];
 
+#ifdef SWIFT_DEBUG_CHECKS
+    if (!s->periodic) {
+      if (old_pos_x < 0. || old_pos_x > dim_x)
+        error("Particle outside of volume along X.");
+      if (old_pos_y < 0. || old_pos_y > dim_y)
+        error("Particle outside of volume along Y.");
+      if (old_pos_z < 0. || old_pos_z > dim_z)
+        error("Particle outside of volume along Z.");
+    }
+#endif
+
     /* Put it back into the simulation volume */
     const double pos_x = box_wrap(old_pos_x, 0.0, dim_x);
     const double pos_y = box_wrap(old_pos_y, 0.0, dim_y);
@@ -1163,21 +1926,36 @@ void space_gparts_get_cell_index_mapper(void *map_data, int nr_gparts,
             pos_z);
 #endif
 
-    ind[k] = index;
-    cell_counts[index]++;
+    if (gp->time_bin == time_bin_inhibited) {
+      /* Is this particle to be removed? */
+      ind[k] = -1;
+      ++count_inhibited_gpart;
+    } else if (gp->time_bin == time_bin_not_created) {
+      /* Is this a place-holder for on-the-fly creation? */
+      ind[k] = index;
+      cell_counts[index]++;
+      ++count_extra_gpart;
+    } else {
+      /* List its top-level cell index */
+      ind[k] = index;
+      cell_counts[index]++;
 
-    /* Compute minimal mass */
-    if (gp->type == swift_type_dark_matter) {
-      min_mass = min(min_mass, gp->mass);
-      sum_vel_norm += gp->v_full[0] * gp->v_full[0] +
-                      gp->v_full[1] * gp->v_full[1] +
-                      gp->v_full[2] * gp->v_full[2];
-    }
+      if (gp->type == swift_type_dark_matter) {
+
+        /* Compute minimal mass */
+        min_mass = min(min_mass, gp->mass);
+
+        /* Compute sum of velocity norm */
+        sum_vel_norm += gp->v_full[0] * gp->v_full[0] +
+                        gp->v_full[1] * gp->v_full[1] +
+                        gp->v_full[2] * gp->v_full[2];
+      }
 
-    /* Update the position */
-    gp->x[0] = pos_x;
-    gp->x[1] = pos_y;
-    gp->x[2] = pos_z;
+      /* Update the position */
+      gp->x[0] = pos_x;
+      gp->x[1] = pos_y;
+      gp->x[2] = pos_z;
+    }
   }
 
   /* Write the counts back to the global array. */
@@ -1185,6 +1963,12 @@ void space_gparts_get_cell_index_mapper(void *map_data, int nr_gparts,
     if (cell_counts[k]) atomic_add(&data->cell_counts[k], cell_counts[k]);
   free(cell_counts);
 
+  /* Write the count of inhibited and extra gparts */
+  if (count_inhibited_gpart)
+    atomic_add(&data->count_inhibited_gpart, count_inhibited_gpart);
+  if (count_extra_gpart)
+    atomic_add(&data->count_extra_gpart, count_extra_gpart);
+
   /* Write back the minimal part mass and velocity sum */
   atomic_min_f(&s->min_gpart_mass, min_mass);
   atomic_add_f(&s->sum_gpart_vel_norm, sum_vel_norm);
@@ -1223,6 +2007,8 @@ void space_sparts_get_cell_index_mapper(void *map_data, int nr_sparts,
   /* Init the local collectors */
   float min_mass = FLT_MAX;
   float sum_vel_norm = 0.f;
+  size_t count_inhibited_spart = 0;
+  size_t count_extra_spart = 0;
 
   for (int k = 0; k < nr_sparts; k++) {
 
@@ -1233,6 +2019,17 @@ void space_sparts_get_cell_index_mapper(void *map_data, int nr_sparts,
     const double old_pos_y = sp->x[1];
     const double old_pos_z = sp->x[2];
 
+#ifdef SWIFT_DEBUG_CHECKS
+    if (!s->periodic) {
+      if (old_pos_x < 0. || old_pos_x > dim_x)
+        error("Particle outside of volume along X.");
+      if (old_pos_y < 0. || old_pos_y > dim_y)
+        error("Particle outside of volume along Y.");
+      if (old_pos_z < 0. || old_pos_z > dim_z)
+        error("Particle outside of volume along Z.");
+    }
+#endif
+
     /* Put it back into the simulation volume */
     const double pos_x = box_wrap(old_pos_x, 0.0, dim_x);
     const double pos_y = box_wrap(old_pos_y, 0.0, dim_y);
@@ -1253,20 +2050,32 @@ void space_sparts_get_cell_index_mapper(void *map_data, int nr_sparts,
             pos_z);
 #endif
 
-    ind[k] = index;
-    cell_counts[index]++;
+    /* Is this particle to be removed? */
+    if (sp->time_bin == time_bin_inhibited) {
+      ind[k] = -1;
+      ++count_inhibited_spart;
+    } else if (sp->time_bin == time_bin_not_created) {
+      /* Is this a place-holder for on-the-fly creation? */
+      ind[k] = index;
+      cell_counts[index]++;
+      ++count_extra_spart;
+    } else {
+      /* List its top-level cell index */
+      ind[k] = index;
+      cell_counts[index]++;
 
-    /* Compute minimal mass */
-    min_mass = min(min_mass, sp->mass);
+      /* Compute minimal mass */
+      min_mass = min(min_mass, sp->mass);
 
-    /* Compute sum of velocity norm */
-    sum_vel_norm +=
-        sp->v[0] * sp->v[0] + sp->v[1] * sp->v[1] + sp->v[2] * sp->v[2];
+      /* Compute sum of velocity norm */
+      sum_vel_norm +=
+          sp->v[0] * sp->v[0] + sp->v[1] * sp->v[1] + sp->v[2] * sp->v[2];
 
-    /* Update the position */
-    sp->x[0] = pos_x;
-    sp->x[1] = pos_y;
-    sp->x[2] = pos_z;
+      /* Update the position */
+      sp->x[0] = pos_x;
+      sp->x[1] = pos_y;
+      sp->x[2] = pos_z;
+    }
   }
 
   /* Write the counts back to the global array. */
@@ -1274,6 +2083,12 @@ void space_sparts_get_cell_index_mapper(void *map_data, int nr_sparts,
     if (cell_counts[k]) atomic_add(&data->cell_counts[k], cell_counts[k]);
   free(cell_counts);
 
+  /* Write the count of inhibited and extra sparts */
+  if (count_inhibited_spart)
+    atomic_add(&data->count_inhibited_spart, count_inhibited_spart);
+  if (count_extra_spart)
+    atomic_add(&data->count_extra_spart, count_extra_spart);
+
   /* Write back the minimal part mass and velocity sum */
   atomic_min_f(&s->min_spart_mass, min_mass);
   atomic_add_f(&s->sum_spart_vel_norm, sum_vel_norm);
@@ -1287,11 +2102,14 @@ void space_sparts_get_cell_index_mapper(void *map_data, int nr_sparts,
  * @param s The #space.
  * @param ind The array of indices to fill.
  * @param cell_counts The cell counters to update.
- * @param cells The array of #cell to update.
+ * @param count_inhibited_parts (return) The number of #part to remove.
+ * @param count_extra_parts (return) The number of #part for on-the-fly
+ * creation.
  * @param verbose Are we talkative ?
  */
 void space_parts_get_cell_index(struct space *s, int *ind, int *cell_counts,
-                                struct cell *cells, int verbose) {
+                                size_t *count_inhibited_parts,
+                                size_t *count_extra_parts, int verbose) {
 
   const ticks tic = getticks();
 
@@ -1302,13 +2120,21 @@ void space_parts_get_cell_index(struct space *s, int *ind, int *cell_counts,
   /* Pack the extra information */
   struct index_data data;
   data.s = s;
-  data.cells = cells;
   data.ind = ind;
   data.cell_counts = cell_counts;
+  data.count_inhibited_part = 0;
+  data.count_inhibited_gpart = 0;
+  data.count_inhibited_spart = 0;
+  data.count_extra_part = 0;
+  data.count_extra_gpart = 0;
+  data.count_extra_spart = 0;
 
   threadpool_map(&s->e->threadpool, space_parts_get_cell_index_mapper, s->parts,
                  s->nr_parts, sizeof(struct part), 0, &data);
 
+  *count_inhibited_parts = data.count_inhibited_part;
+  *count_extra_parts = data.count_extra_part;
+
   if (verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
             clocks_getunit());
@@ -1322,11 +2148,14 @@ void space_parts_get_cell_index(struct space *s, int *ind, int *cell_counts,
  * @param s The #space.
  * @param gind The array of indices to fill.
  * @param cell_counts The cell counters to update.
- * @param cells The array of #cell to update.
+ * @param count_inhibited_gparts (return) The number of #gpart to remove.
+ * @param count_extra_gparts (return) The number of #gpart for on-the-fly
+ * creation.
  * @param verbose Are we talkative ?
  */
 void space_gparts_get_cell_index(struct space *s, int *gind, int *cell_counts,
-                                 struct cell *cells, int verbose) {
+                                 size_t *count_inhibited_gparts,
+                                 size_t *count_extra_gparts, int verbose) {
 
   const ticks tic = getticks();
 
@@ -1337,13 +2166,21 @@ void space_gparts_get_cell_index(struct space *s, int *gind, int *cell_counts,
   /* Pack the extra information */
   struct index_data data;
   data.s = s;
-  data.cells = cells;
   data.ind = gind;
   data.cell_counts = cell_counts;
+  data.count_inhibited_part = 0;
+  data.count_inhibited_gpart = 0;
+  data.count_inhibited_spart = 0;
+  data.count_extra_part = 0;
+  data.count_extra_gpart = 0;
+  data.count_extra_spart = 0;
 
   threadpool_map(&s->e->threadpool, space_gparts_get_cell_index_mapper,
                  s->gparts, s->nr_gparts, sizeof(struct gpart), 0, &data);
 
+  *count_inhibited_gparts = data.count_inhibited_gpart;
+  *count_extra_gparts = data.count_extra_gpart;
+
   if (verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
             clocks_getunit());
@@ -1357,11 +2194,14 @@ void space_gparts_get_cell_index(struct space *s, int *gind, int *cell_counts,
  * @param s The #space.
  * @param sind The array of indices to fill.
  * @param cell_counts The cell counters to update.
- * @param cells The array of #cell to update.
+ * @param count_inhibited_sparts (return) The number of #spart to remove.
+ * @param count_extra_sparts (return) The number of #spart for on-the-fly
+ * creation.
  * @param verbose Are we talkative ?
  */
 void space_sparts_get_cell_index(struct space *s, int *sind, int *cell_counts,
-                                 struct cell *cells, int verbose) {
+                                 size_t *count_inhibited_sparts,
+                                 size_t *count_extra_sparts, int verbose) {
 
   const ticks tic = getticks();
 
@@ -1372,13 +2212,21 @@ void space_sparts_get_cell_index(struct space *s, int *sind, int *cell_counts,
   /* Pack the extra information */
   struct index_data data;
   data.s = s;
-  data.cells = cells;
   data.ind = sind;
   data.cell_counts = cell_counts;
+  data.count_inhibited_part = 0;
+  data.count_inhibited_gpart = 0;
+  data.count_inhibited_spart = 0;
+  data.count_extra_part = 0;
+  data.count_extra_gpart = 0;
+  data.count_extra_spart = 0;
 
   threadpool_map(&s->e->threadpool, space_sparts_get_cell_index_mapper,
                  s->sparts, s->nr_sparts, sizeof(struct spart), 0, &data);
 
+  *count_inhibited_sparts = data.count_inhibited_spart;
+  *count_extra_sparts = data.count_extra_spart;
+
   if (verbose)
     message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
             clocks_getunit());
@@ -1395,8 +2243,9 @@ void space_sparts_get_cell_index(struct space *s, int *sind, int *cell_counts,
  * @param num_bins Total number of bins (length of count).
  * @param parts_offset Offset of the #part array from the global #part array.
  */
-void space_parts_sort(struct part *parts, struct xpart *xparts, int *ind,
-                      int *counts, int num_bins, ptrdiff_t parts_offset) {
+void space_parts_sort(struct part *parts, struct xpart *xparts,
+                      int *restrict ind, int *restrict counts, int num_bins,
+                      ptrdiff_t parts_offset) {
   /* Create the offsets array. */
   size_t *offsets = NULL;
   if (posix_memalign((void **)&offsets, SWIFT_STRUCT_ALIGNMENT,
@@ -1457,8 +2306,9 @@ void space_parts_sort(struct part *parts, struct xpart *xparts, int *ind,
  * @param sparts_offset Offset of the #spart array from the global #spart.
  * array.
  */
-void space_sparts_sort(struct spart *sparts, int *ind, int *counts,
-                       int num_bins, ptrdiff_t sparts_offset) {
+void space_sparts_sort(struct spart *sparts, int *restrict ind,
+                       int *restrict counts, int num_bins,
+                       ptrdiff_t sparts_offset) {
   /* Create the offsets array. */
   size_t *offsets = NULL;
   if (posix_memalign((void **)&offsets, SWIFT_STRUCT_ALIGNMENT,
@@ -1517,8 +2367,8 @@ void space_sparts_sort(struct spart *sparts, int *ind, int *counts,
  * @param num_bins Total number of bins (length of counts).
  */
 void space_gparts_sort(struct gpart *gparts, struct part *parts,
-                       struct spart *sparts, int *ind, int *counts,
-                       int num_bins) {
+                       struct spart *sparts, int *restrict ind,
+                       int *restrict counts, int num_bins) {
   /* Create the offsets array. */
   size_t *offsets = NULL;
   if (posix_memalign((void **)&offsets, SWIFT_STRUCT_ALIGNMENT,
@@ -1549,7 +2399,7 @@ void space_gparts_sort(struct gpart *gparts, struct part *parts,
         memswap(&ind[j], &target_cid, sizeof(int));
         if (gparts[j].type == swift_type_gas) {
           parts[-gparts[j].id_or_neg_offset].gpart = &gparts[j];
-        } else if (gparts[j].type == swift_type_star) {
+        } else if (gparts[j].type == swift_type_stars) {
           sparts[-gparts[j].id_or_neg_offset].gpart = &gparts[j];
         }
       }
@@ -1557,7 +2407,7 @@ void space_gparts_sort(struct gpart *gparts, struct part *parts,
       ind[k] = target_cid;
       if (gparts[k].type == swift_type_gas) {
         parts[-gparts[k].id_or_neg_offset].gpart = &gparts[k];
-      } else if (gparts[k].type == swift_type_star) {
+      } else if (gparts[k].type == swift_type_stars) {
         sparts[-gparts[k].id_or_neg_offset].gpart = &gparts[k];
       }
     }
@@ -1577,11 +2427,16 @@ void space_gparts_sort(struct gpart *gparts, struct part *parts,
  */
 void space_map_clearsort(struct cell *c, void *data) {
 
-  for (int i = 0; i < 13; i++)
-    if (c->sort[i] != NULL) {
-      free(c->sort[i]);
-      c->sort[i] = NULL;
+  for (int i = 0; i < 13; i++) {
+    if (c->hydro.sort[i] != NULL) {
+      free(c->hydro.sort[i]);
+      c->hydro.sort[i] = NULL;
+    }
+    if (c->stars.sort[i] != NULL) {
+      free(c->stars.sort[i]);
+      c->stars.sort[i] = NULL;
     }
+  }
 }
 
 /**
@@ -1597,7 +2452,7 @@ static void rec_map_parts(struct cell *c,
                           void *data) {
   /* No progeny? */
   if (!c->split)
-    for (int k = 0; k < c->count; k++) fun(&c->parts[k], c, data);
+    for (int k = 0; k < c->hydro.count; k++) fun(&c->hydro.parts[k], c, data);
 
   /* Otherwise, recurse. */
   else
@@ -1633,7 +2488,8 @@ static void rec_map_parts_xparts(struct cell *c,
 
   /* No progeny? */
   if (!c->split)
-    for (int k = 0; k < c->count; k++) fun(&c->parts[k], &c->xparts[k], c);
+    for (int k = 0; k < c->hydro.count; k++)
+      fun(&c->hydro.parts[k], &c->hydro.xparts[k], c);
 
   /* Otherwise, recurse. */
   else
@@ -1729,31 +2585,34 @@ void space_map_cells_pre(struct space *s, int full,
  * @param s The #space in which the cell lives.
  * @param c The #cell to split recursively.
  * @param buff A buffer for particle sorting, should be of size at least
- *        c->count or @c NULL.
+ *        c->hydro.count or @c NULL.
  * @param sbuff A buffer for particle sorting, should be of size at least
- *        c->scount or @c NULL.
+ *        c->stars.count or @c NULL.
  * @param gbuff A buffer for particle sorting, should be of size at least
- *        c->gcount or @c NULL.
+ *        c->grav.count or @c NULL.
  */
 void space_split_recursive(struct space *s, struct cell *c,
                            struct cell_buff *buff, struct cell_buff *sbuff,
                            struct cell_buff *gbuff) {
 
-  const int count = c->count;
-  const int gcount = c->gcount;
-  const int scount = c->scount;
-  const int with_gravity = s->gravity;
+  const int count = c->hydro.count;
+  const int gcount = c->grav.count;
+  const int scount = c->stars.count;
+  const int with_self_gravity = s->with_self_gravity;
   const int depth = c->depth;
   int maxdepth = 0;
   float h_max = 0.0f;
+  float stars_h_max = 0.f;
   integertime_t ti_hydro_end_min = max_nr_timesteps, ti_hydro_end_max = 0,
                 ti_hydro_beg_max = 0;
   integertime_t ti_gravity_end_min = max_nr_timesteps, ti_gravity_end_max = 0,
                 ti_gravity_beg_max = 0;
-  struct part *parts = c->parts;
-  struct gpart *gparts = c->gparts;
-  struct spart *sparts = c->sparts;
-  struct xpart *xparts = c->xparts;
+  integertime_t ti_stars_end_min = max_nr_timesteps, ti_stars_end_max = 0,
+                ti_stars_beg_max = 0;
+  struct part *parts = c->hydro.parts;
+  struct gpart *gparts = c->grav.parts;
+  struct spart *sparts = c->stars.parts;
+  struct xpart *xparts = c->hydro.xparts;
   struct engine *e = s->e;
   const integertime_t ti_current = e->ti_current;
 
@@ -1765,6 +2624,12 @@ void space_split_recursive(struct space *s, struct cell *c,
                          sizeof(struct cell_buff) * count) != 0)
         error("Failed to allocate temporary indices.");
       for (int k = 0; k < count; k++) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (parts[k].time_bin == time_bin_inhibited)
+          error("Inhibited particle present in space_split()");
+        if (parts[k].time_bin == time_bin_not_created)
+          error("Extra particle present in space_split()");
+#endif
         buff[k].x[0] = parts[k].x[0];
         buff[k].x[1] = parts[k].x[1];
         buff[k].x[2] = parts[k].x[2];
@@ -1775,6 +2640,12 @@ void space_split_recursive(struct space *s, struct cell *c,
                          sizeof(struct cell_buff) * gcount) != 0)
         error("Failed to allocate temporary indices.");
       for (int k = 0; k < gcount; k++) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (gparts[k].time_bin == time_bin_inhibited)
+          error("Inhibited particle present in space_split()");
+        if (gparts[k].time_bin == time_bin_not_created)
+          error("Extra particle present in space_split()");
+#endif
         gbuff[k].x[0] = gparts[k].x[0];
         gbuff[k].x[1] = gparts[k].x[1];
         gbuff[k].x[2] = gparts[k].x[2];
@@ -1785,6 +2656,12 @@ void space_split_recursive(struct space *s, struct cell *c,
                          sizeof(struct cell_buff) * scount) != 0)
         error("Failed to allocate temporary indices.");
       for (int k = 0; k < scount; k++) {
+#ifdef SWIFT_DEBUG_CHECKS
+        if (sparts[k].time_bin == time_bin_inhibited)
+          error("Inhibited particle present in space_split()");
+        if (sparts[k].time_bin == time_bin_not_created)
+          error("Extra particle present in space_split()");
+#endif
         sbuff[k].x[0] = sparts[k].x[0];
         sbuff[k].x[1] = sparts[k].x[1];
         sbuff[k].x[2] = sparts[k].x[2];
@@ -1804,8 +2681,8 @@ void space_split_recursive(struct space *s, struct cell *c,
   }
 
   /* Split or let it be? */
-  if ((with_gravity && gcount > space_splitsize) ||
-      (!with_gravity &&
+  if ((with_self_gravity && gcount > space_splitsize) ||
+      (!with_self_gravity &&
        (count > space_splitsize || scount > space_splitsize))) {
 
     /* No longer just a leaf. */
@@ -1815,12 +2692,16 @@ void space_split_recursive(struct space *s, struct cell *c,
     space_getcells(s, 8, c->progeny);
     for (int k = 0; k < 8; k++) {
       struct cell *cp = c->progeny[k];
-      cp->count = 0;
-      cp->gcount = 0;
-      cp->scount = 0;
-      cp->ti_old_part = c->ti_old_part;
-      cp->ti_old_gpart = c->ti_old_gpart;
-      cp->ti_old_multipole = c->ti_old_multipole;
+      cp->hydro.count = 0;
+      cp->grav.count = 0;
+      cp->stars.count = 0;
+      cp->hydro.count_total = 0;
+      cp->grav.count_total = 0;
+      cp->stars.count_total = 0;
+      cp->hydro.ti_old_part = c->hydro.ti_old_part;
+      cp->grav.ti_old_part = c->grav.ti_old_part;
+      cp->grav.ti_old_multipole = c->grav.ti_old_multipole;
+      cp->stars.ti_old_part = c->stars.ti_old_part;
       cp->loc[0] = c->loc[0];
       cp->loc[1] = c->loc[1];
       cp->loc[2] = c->loc[2];
@@ -1833,19 +2714,26 @@ void space_split_recursive(struct space *s, struct cell *c,
       if (k & 1) cp->loc[2] += cp->width[2];
       cp->depth = c->depth + 1;
       cp->split = 0;
-      cp->h_max = 0.f;
-      cp->dx_max_part = 0.f;
-      cp->dx_max_sort = 0.f;
+      cp->hydro.h_max = 0.f;
+      cp->hydro.dx_max_part = 0.f;
+      cp->hydro.dx_max_sort = 0.f;
+      cp->stars.h_max = 0.f;
+      cp->stars.dx_max_part = 0.f;
+      cp->stars.dx_max_sort = 0.f;
       cp->nodeID = c->nodeID;
       cp->parent = c;
       cp->super = NULL;
-      cp->super_hydro = NULL;
-      cp->super_gravity = NULL;
-      cp->do_sub_sort = 0;
-      cp->do_grav_sub_drift = 0;
-      cp->do_sub_drift = 0;
+      cp->hydro.super = NULL;
+      cp->grav.super = NULL;
+      cp->hydro.do_sub_sort = 0;
+      cp->stars.do_sub_sort = 0;
+      cp->grav.do_sub_drift = 0;
+      cp->hydro.do_sub_drift = 0;
+      cp->stars.do_sub_drift = 0;
+      cp->hydro.do_sub_limiter = 0;
+      cp->hydro.do_limiter = 0;
 #ifdef WITH_MPI
-      cp->tag = -1;
+      cp->mpi.tag = -1;
 #endif  // WITH_MPI
 #ifdef SWIFT_DEBUG_CHECKS
       cp->cellID = last_cell_id++;
@@ -1853,8 +2741,8 @@ void space_split_recursive(struct space *s, struct cell *c,
     }
 
     /* Split the cell's partcle data. */
-    cell_split(c, c->parts - s->parts, c->sparts - s->sparts, buff, sbuff,
-               gbuff);
+    cell_split(c, c->hydro.parts - s->parts, c->stars.parts - s->sparts, buff,
+               sbuff, gbuff);
 
     /* Buffers for the progenitors */
     struct cell_buff *progeny_buff = buff, *progeny_gbuff = gbuff,
@@ -1866,7 +2754,7 @@ void space_split_recursive(struct space *s, struct cell *c,
       struct cell *cp = c->progeny[k];
 
       /* Remove any progeny with zero particles. */
-      if (cp->count == 0 && cp->gcount == 0 && cp->scount == 0) {
+      if (cp->hydro.count == 0 && cp->grav.count == 0 && cp->stars.count == 0) {
 
         space_recycle(s, cp);
         c->progeny[k] = NULL;
@@ -1878,18 +2766,22 @@ void space_split_recursive(struct space *s, struct cell *c,
                               progeny_gbuff);
 
         /* Update the pointers in the buffers */
-        progeny_buff += cp->count;
-        progeny_gbuff += cp->gcount;
-        progeny_sbuff += cp->scount;
+        progeny_buff += cp->hydro.count;
+        progeny_gbuff += cp->grav.count;
+        progeny_sbuff += cp->stars.count;
 
         /* Update the cell-wide properties */
-        h_max = max(h_max, cp->h_max);
-        ti_hydro_end_min = min(ti_hydro_end_min, cp->ti_hydro_end_min);
-        ti_hydro_end_max = max(ti_hydro_end_max, cp->ti_hydro_end_max);
-        ti_hydro_beg_max = max(ti_hydro_beg_max, cp->ti_hydro_beg_max);
-        ti_gravity_end_min = min(ti_gravity_end_min, cp->ti_gravity_end_min);
-        ti_gravity_end_max = max(ti_gravity_end_max, cp->ti_gravity_end_max);
-        ti_gravity_beg_max = max(ti_gravity_beg_max, cp->ti_gravity_beg_max);
+        h_max = max(h_max, cp->hydro.h_max);
+        stars_h_max = max(stars_h_max, cp->stars.h_max);
+        ti_hydro_end_min = min(ti_hydro_end_min, cp->hydro.ti_end_min);
+        ti_hydro_end_max = max(ti_hydro_end_max, cp->hydro.ti_end_max);
+        ti_hydro_beg_max = max(ti_hydro_beg_max, cp->hydro.ti_beg_max);
+        ti_gravity_end_min = min(ti_gravity_end_min, cp->grav.ti_end_min);
+        ti_gravity_end_max = max(ti_gravity_end_max, cp->grav.ti_end_max);
+        ti_gravity_beg_max = max(ti_gravity_beg_max, cp->grav.ti_beg_max);
+        ti_stars_end_min = min(ti_stars_end_min, cp->stars.ti_end_min);
+        ti_stars_end_max = min(ti_stars_end_max, cp->stars.ti_end_max);
+        ti_stars_beg_max = min(ti_stars_beg_max, cp->stars.ti_beg_max);
 
         /* Increase the depth */
         if (cp->maxdepth > maxdepth) maxdepth = cp->maxdepth;
@@ -1897,10 +2789,10 @@ void space_split_recursive(struct space *s, struct cell *c,
     }
 
     /* Deal with the multipole */
-    if (s->gravity) {
+    if (s->with_self_gravity) {
 
       /* Reset everything */
-      gravity_reset(c->multipole);
+      gravity_reset(c->grav.multipole);
 
       /* Compute CoM and bulk velocity from all progenies */
       double CoM[3] = {0., 0., 0.};
@@ -1911,7 +2803,7 @@ void space_split_recursive(struct space *s, struct cell *c,
 
       for (int k = 0; k < 8; ++k) {
         if (c->progeny[k] != NULL) {
-          const struct gravity_tensors *m = c->progeny[k]->multipole;
+          const struct gravity_tensors *m = c->progeny[k]->grav.multipole;
 
           mass += m->m_pole.M_000;
 
@@ -1935,20 +2827,20 @@ void space_split_recursive(struct space *s, struct cell *c,
 
       /* Final operation on the CoM and bulk velocity */
       const double inv_mass = 1. / mass;
-      c->multipole->CoM[0] = CoM[0] * inv_mass;
-      c->multipole->CoM[1] = CoM[1] * inv_mass;
-      c->multipole->CoM[2] = CoM[2] * inv_mass;
-      c->multipole->m_pole.vel[0] = vel[0] * inv_mass;
-      c->multipole->m_pole.vel[1] = vel[1] * inv_mass;
-      c->multipole->m_pole.vel[2] = vel[2] * inv_mass;
+      c->grav.multipole->CoM[0] = CoM[0] * inv_mass;
+      c->grav.multipole->CoM[1] = CoM[1] * inv_mass;
+      c->grav.multipole->CoM[2] = CoM[2] * inv_mass;
+      c->grav.multipole->m_pole.vel[0] = vel[0] * inv_mass;
+      c->grav.multipole->m_pole.vel[1] = vel[1] * inv_mass;
+      c->grav.multipole->m_pole.vel[2] = vel[2] * inv_mass;
 
       /* Min max velocity along each axis */
-      c->multipole->m_pole.max_delta_vel[0] = max_delta_vel[0];
-      c->multipole->m_pole.max_delta_vel[1] = max_delta_vel[1];
-      c->multipole->m_pole.max_delta_vel[2] = max_delta_vel[2];
-      c->multipole->m_pole.min_delta_vel[0] = min_delta_vel[0];
-      c->multipole->m_pole.min_delta_vel[1] = min_delta_vel[1];
-      c->multipole->m_pole.min_delta_vel[2] = min_delta_vel[2];
+      c->grav.multipole->m_pole.max_delta_vel[0] = max_delta_vel[0];
+      c->grav.multipole->m_pole.max_delta_vel[1] = max_delta_vel[1];
+      c->grav.multipole->m_pole.max_delta_vel[2] = max_delta_vel[2];
+      c->grav.multipole->m_pole.min_delta_vel[0] = min_delta_vel[0];
+      c->grav.multipole->m_pole.min_delta_vel[1] = min_delta_vel[1];
+      c->grav.multipole->m_pole.min_delta_vel[2] = min_delta_vel[2];
 
       /* Now shift progeny multipoles and add them up */
       struct multipole temp;
@@ -1956,45 +2848,52 @@ void space_split_recursive(struct space *s, struct cell *c,
       for (int k = 0; k < 8; ++k) {
         if (c->progeny[k] != NULL) {
           const struct cell *cp = c->progeny[k];
-          const struct multipole *m = &cp->multipole->m_pole;
+          const struct multipole *m = &cp->grav.multipole->m_pole;
 
           /* Contribution to multipole */
-          gravity_M2M(&temp, m, c->multipole->CoM, cp->multipole->CoM);
-          gravity_multipole_add(&c->multipole->m_pole, &temp);
+          gravity_M2M(&temp, m, c->grav.multipole->CoM,
+                      cp->grav.multipole->CoM);
+          gravity_multipole_add(&c->grav.multipole->m_pole, &temp);
 
           /* Upper limit of max CoM<->gpart distance */
-          const double dx = c->multipole->CoM[0] - cp->multipole->CoM[0];
-          const double dy = c->multipole->CoM[1] - cp->multipole->CoM[1];
-          const double dz = c->multipole->CoM[2] - cp->multipole->CoM[2];
+          const double dx =
+              c->grav.multipole->CoM[0] - cp->grav.multipole->CoM[0];
+          const double dy =
+              c->grav.multipole->CoM[1] - cp->grav.multipole->CoM[1];
+          const double dz =
+              c->grav.multipole->CoM[2] - cp->grav.multipole->CoM[2];
           const double r2 = dx * dx + dy * dy + dz * dz;
-          r_max = max(r_max, cp->multipole->r_max + sqrt(r2));
+          r_max = max(r_max, cp->grav.multipole->r_max + sqrt(r2));
         }
       }
 
       /* Alternative upper limit of max CoM<->gpart distance */
-      const double dx = c->multipole->CoM[0] > c->loc[0] + c->width[0] / 2.
-                            ? c->multipole->CoM[0] - c->loc[0]
-                            : c->loc[0] + c->width[0] - c->multipole->CoM[0];
-      const double dy = c->multipole->CoM[1] > c->loc[1] + c->width[1] / 2.
-                            ? c->multipole->CoM[1] - c->loc[1]
-                            : c->loc[1] + c->width[1] - c->multipole->CoM[1];
-      const double dz = c->multipole->CoM[2] > c->loc[2] + c->width[2] / 2.
-                            ? c->multipole->CoM[2] - c->loc[2]
-                            : c->loc[2] + c->width[2] - c->multipole->CoM[2];
+      const double dx =
+          c->grav.multipole->CoM[0] > c->loc[0] + c->width[0] / 2.
+              ? c->grav.multipole->CoM[0] - c->loc[0]
+              : c->loc[0] + c->width[0] - c->grav.multipole->CoM[0];
+      const double dy =
+          c->grav.multipole->CoM[1] > c->loc[1] + c->width[1] / 2.
+              ? c->grav.multipole->CoM[1] - c->loc[1]
+              : c->loc[1] + c->width[1] - c->grav.multipole->CoM[1];
+      const double dz =
+          c->grav.multipole->CoM[2] > c->loc[2] + c->width[2] / 2.
+              ? c->grav.multipole->CoM[2] - c->loc[2]
+              : c->loc[2] + c->width[2] - c->grav.multipole->CoM[2];
 
       /* Take minimum of both limits */
-      c->multipole->r_max = min(r_max, sqrt(dx * dx + dy * dy + dz * dz));
+      c->grav.multipole->r_max = min(r_max, sqrt(dx * dx + dy * dy + dz * dz));
 
       /* Store the value at rebuild time */
-      c->multipole->r_max_rebuild = c->multipole->r_max;
-      c->multipole->CoM_rebuild[0] = c->multipole->CoM[0];
-      c->multipole->CoM_rebuild[1] = c->multipole->CoM[1];
-      c->multipole->CoM_rebuild[2] = c->multipole->CoM[2];
+      c->grav.multipole->r_max_rebuild = c->grav.multipole->r_max;
+      c->grav.multipole->CoM_rebuild[0] = c->grav.multipole->CoM[0];
+      c->grav.multipole->CoM_rebuild[1] = c->grav.multipole->CoM[1];
+      c->grav.multipole->CoM_rebuild[2] = c->grav.multipole->CoM[2];
 
       /* We know the first-order multipole (dipole) is 0. */
-      c->multipole->m_pole.M_100 = 0.f;
-      c->multipole->m_pole.M_010 = 0.f;
-      c->multipole->m_pole.M_001 = 0.f;
+      c->grav.multipole->m_pole.M_100 = 0.f;
+      c->grav.multipole->m_pole.M_010 = 0.f;
+      c->grav.multipole->m_pole.M_001 = 0.f;
 
     } /* Deal with gravity */
   }   /* Split or let it be? */
@@ -2009,10 +2908,13 @@ void space_split_recursive(struct space *s, struct cell *c,
 
     timebin_t hydro_time_bin_min = num_time_bins, hydro_time_bin_max = 0;
     timebin_t gravity_time_bin_min = num_time_bins, gravity_time_bin_max = 0;
+    timebin_t stars_time_bin_min = num_time_bins;
 
     /* parts: Get dt_min/dt_max and h_max. */
     for (int k = 0; k < count; k++) {
 #ifdef SWIFT_DEBUG_CHECKS
+      if (parts[k].time_bin == time_bin_not_created)
+        error("Extra particle present in space_split()");
       if (parts[k].time_bin == time_bin_inhibited)
         error("Inhibited particle present in space_split()");
 #endif
@@ -2031,6 +2933,8 @@ void space_split_recursive(struct space *s, struct cell *c,
     /* gparts: Get dt_min/dt_max. */
     for (int k = 0; k < gcount; k++) {
 #ifdef SWIFT_DEBUG_CHECKS
+      if (gparts[k].time_bin == time_bin_not_created)
+        error("Extra g-particle present in space_split()");
       if (gparts[k].time_bin == time_bin_inhibited)
         error("Inhibited g-particle present in space_split()");
 #endif
@@ -2041,11 +2945,21 @@ void space_split_recursive(struct space *s, struct cell *c,
     /* sparts: Get dt_min/dt_max */
     for (int k = 0; k < scount; k++) {
 #ifdef SWIFT_DEBUG_CHECKS
+      if (sparts[k].time_bin == time_bin_not_created)
+        error("Extra s-particle present in space_split()");
       if (sparts[k].time_bin == time_bin_inhibited)
         error("Inhibited s-particle present in space_split()");
 #endif
       gravity_time_bin_min = min(gravity_time_bin_min, sparts[k].time_bin);
       gravity_time_bin_max = max(gravity_time_bin_max, sparts[k].time_bin);
+      stars_time_bin_min = min(stars_time_bin_min, sparts[k].time_bin);
+
+      stars_h_max = max(stars_h_max, sparts[k].h);
+
+      /* Reset x_diff */
+      sparts[k].x_diff[0] = 0.f;
+      sparts[k].x_diff[1] = 0.f;
+      sparts[k].x_diff[2] = 0.f;
     }
 
     /* Convert into integer times */
@@ -2057,55 +2971,60 @@ void space_split_recursive(struct space *s, struct cell *c,
     ti_gravity_end_max = get_integer_time_end(ti_current, gravity_time_bin_max);
     ti_gravity_beg_max =
         get_integer_time_begin(ti_current + 1, gravity_time_bin_max);
+    ti_stars_end_min = get_integer_time_end(ti_current, stars_time_bin_min);
 
     /* Construct the multipole and the centre of mass*/
-    if (s->gravity) {
+    if (s->with_self_gravity) {
       if (gcount > 0) {
 
-        gravity_P2M(c->multipole, c->gparts, c->gcount);
+        gravity_P2M(c->grav.multipole, c->grav.parts, c->grav.count);
 
       } else {
 
         /* No gparts in that leaf cell */
 
         /* Set the values to something sensible */
-        gravity_multipole_init(&c->multipole->m_pole);
+        gravity_multipole_init(&c->grav.multipole->m_pole);
         if (c->nodeID == engine_rank) {
-          c->multipole->CoM[0] = c->loc[0] + c->width[0] / 2.;
-          c->multipole->CoM[1] = c->loc[1] + c->width[1] / 2.;
-          c->multipole->CoM[2] = c->loc[2] + c->width[2] / 2.;
-          c->multipole->r_max = 0.;
+          c->grav.multipole->CoM[0] = c->loc[0] + c->width[0] / 2.;
+          c->grav.multipole->CoM[1] = c->loc[1] + c->width[1] / 2.;
+          c->grav.multipole->CoM[2] = c->loc[2] + c->width[2] / 2.;
+          c->grav.multipole->r_max = 0.;
         }
       }
 
       /* Store the value at rebuild time */
-      c->multipole->r_max_rebuild = c->multipole->r_max;
-      c->multipole->CoM_rebuild[0] = c->multipole->CoM[0];
-      c->multipole->CoM_rebuild[1] = c->multipole->CoM[1];
-      c->multipole->CoM_rebuild[2] = c->multipole->CoM[2];
+      c->grav.multipole->r_max_rebuild = c->grav.multipole->r_max;
+      c->grav.multipole->CoM_rebuild[0] = c->grav.multipole->CoM[0];
+      c->grav.multipole->CoM_rebuild[1] = c->grav.multipole->CoM[1];
+      c->grav.multipole->CoM_rebuild[2] = c->grav.multipole->CoM[2];
     }
   }
 
   /* Set the values for this cell. */
-  c->h_max = h_max;
-  c->ti_hydro_end_min = ti_hydro_end_min;
-  c->ti_hydro_end_max = ti_hydro_end_max;
-  c->ti_hydro_beg_max = ti_hydro_beg_max;
-  c->ti_gravity_end_min = ti_gravity_end_min;
-  c->ti_gravity_end_max = ti_gravity_end_max;
-  c->ti_gravity_beg_max = ti_gravity_beg_max;
+  c->hydro.h_max = h_max;
+  c->hydro.ti_end_min = ti_hydro_end_min;
+  c->hydro.ti_end_max = ti_hydro_end_max;
+  c->hydro.ti_beg_max = ti_hydro_beg_max;
+  c->grav.ti_end_min = ti_gravity_end_min;
+  c->grav.ti_end_max = ti_gravity_end_max;
+  c->grav.ti_beg_max = ti_gravity_beg_max;
+  c->stars.ti_end_min = ti_stars_end_min;
+  c->stars.ti_end_max = ti_stars_end_max;
+  c->stars.ti_beg_max = ti_stars_beg_max;
+  c->stars.h_max = stars_h_max;
   c->maxdepth = maxdepth;
 
   /* Set ownership according to the start of the parts array. */
   if (s->nr_parts > 0)
-    c->owner =
-        ((c->parts - s->parts) % s->nr_parts) * s->nr_queues / s->nr_parts;
+    c->owner = ((c->hydro.parts - s->parts) % s->nr_parts) * s->nr_queues /
+               s->nr_parts;
   else if (s->nr_sparts > 0)
-    c->owner =
-        ((c->sparts - s->sparts) % s->nr_sparts) * s->nr_queues / s->nr_sparts;
+    c->owner = ((c->stars.parts - s->sparts) % s->nr_sparts) * s->nr_queues /
+               s->nr_sparts;
   else if (s->nr_gparts > 0)
-    c->owner =
-        ((c->gparts - s->gparts) % s->nr_gparts) * s->nr_queues / s->nr_gparts;
+    c->owner = ((c->grav.parts - s->gparts) % s->nr_gparts) * s->nr_queues /
+               s->nr_gparts;
   else
     c->owner = 0; /* Ok, there is really nothing on this rank... */
 
@@ -2129,10 +3048,12 @@ void space_split_mapper(void *map_data, int num_cells, void *extra_data) {
 
   /* Unpack the inputs. */
   struct space *s = (struct space *)extra_data;
-  struct cell *restrict cells_top = (struct cell *)map_data;
+  struct cell *cells_top = s->cells_top;
+  int *local_cells_with_particles = (int *)map_data;
 
+  /* Loop over the non-empty cells */
   for (int ind = 0; ind < num_cells; ind++) {
-    struct cell *c = &cells_top[ind];
+    struct cell *c = &cells_top[local_cells_with_particles[ind]];
     space_split_recursive(s, c, NULL, NULL, NULL);
   }
 
@@ -2140,8 +3061,8 @@ void space_split_mapper(void *map_data, int num_cells, void *extra_data) {
   /* All cells and particles should have consistent h_max values. */
   for (int ind = 0; ind < num_cells; ind++) {
     int depth = 0;
-    if (!checkCellhdxmax(&cells_top[ind], &depth))
-      message("    at cell depth %d", depth);
+    const struct cell *c = &cells_top[local_cells_with_particles[ind]];
+    if (!checkCellhdxmax(c, &depth)) message("    at cell depth %d", depth);
   }
 #endif
 }
@@ -2155,17 +3076,17 @@ void space_split_mapper(void *map_data, int num_cells, void *extra_data) {
 void space_recycle(struct space *s, struct cell *c) {
 
   /* Clear the cell. */
-  if (lock_destroy(&c->lock) != 0 || lock_destroy(&c->glock) != 0 ||
-      lock_destroy(&c->mlock) != 0 || lock_destroy(&c->slock) != 0)
+  if (lock_destroy(&c->lock) != 0 || lock_destroy(&c->grav.plock) != 0 ||
+      lock_destroy(&c->mlock) != 0 || lock_destroy(&c->stars.lock) != 0)
     error("Failed to destroy spinlocks.");
 
   /* Lock the space. */
   lock_lock(&s->lock);
 
   /* Hook the multipole back in the buffer */
-  if (s->gravity) {
-    c->multipole->next = s->multipoles_sub;
-    s->multipoles_sub = c->multipole;
+  if (s->with_self_gravity) {
+    c->grav.multipole->next = s->multipoles_sub;
+    s->multipoles_sub = c->grav.multipole;
   }
 
   /* Hook this cell into the buffer. */
@@ -2204,8 +3125,8 @@ void space_recycle_list(struct space *s, struct cell *cell_list_begin,
   /* Clean up the list of cells. */
   for (struct cell *c = cell_list_begin; c != NULL; c = c->next) {
     /* Clear the cell. */
-    if (lock_destroy(&c->lock) != 0 || lock_destroy(&c->glock) != 0 ||
-        lock_destroy(&c->mlock) != 0 || lock_destroy(&c->slock) != 0)
+    if (lock_destroy(&c->lock) != 0 || lock_destroy(&c->grav.plock) != 0 ||
+        lock_destroy(&c->mlock) != 0 || lock_destroy(&c->stars.lock) != 0)
       error("Failed to destroy spinlocks.");
 
     /* Count this cell. */
@@ -2221,7 +3142,7 @@ void space_recycle_list(struct space *s, struct cell *cell_list_begin,
   s->tot_cells -= count;
 
   /* Hook the multipoles into the buffer. */
-  if (s->gravity) {
+  if (s->with_self_gravity) {
     multipole_list_end->next = s->multipoles_sub;
     s->multipoles_sub = multipole_list_begin;
   }
@@ -2266,7 +3187,7 @@ void space_getcells(struct space *s, int nr_cells, struct cell **cells) {
     }
 
     /* Is the multipole buffer empty? */
-    if (s->gravity && s->multipoles_sub == NULL) {
+    if (s->with_self_gravity && s->multipoles_sub == NULL) {
       if (posix_memalign(
               (void **)&s->multipoles_sub, multipole_align,
               space_cellallocchunk * sizeof(struct gravity_tensors)) != 0)
@@ -2286,9 +3207,9 @@ void space_getcells(struct space *s, int nr_cells, struct cell **cells) {
     s->tot_cells += 1;
 
     /* Hook the multipole */
-    if (s->gravity) {
-      cells[j]->multipole = s->multipoles_sub;
-      s->multipoles_sub = cells[j]->multipole->next;
+    if (s->with_self_gravity) {
+      cells[j]->grav.multipole = s->multipoles_sub;
+      s->multipoles_sub = cells[j]->grav.multipole->next;
     }
   }
 
@@ -2297,14 +3218,18 @@ void space_getcells(struct space *s, int nr_cells, struct cell **cells) {
 
   /* Init some things in the cell we just got. */
   for (int j = 0; j < nr_cells; j++) {
-    for (int k = 0; k < 13; k++)
-      if (cells[j]->sort[k] != NULL) free(cells[j]->sort[k]);
-    struct gravity_tensors *temp = cells[j]->multipole;
+    for (int k = 0; k < 13; k++) {
+      if (cells[j]->hydro.sort[k] != NULL) free(cells[j]->hydro.sort[k]);
+      if (cells[j]->stars.sort[k] != NULL) free(cells[j]->stars.sort[k]);
+    }
+    struct gravity_tensors *temp = cells[j]->grav.multipole;
     bzero(cells[j], sizeof(struct cell));
-    cells[j]->multipole = temp;
+    cells[j]->grav.multipole = temp;
     cells[j]->nodeID = -1;
-    if (lock_init(&cells[j]->lock) != 0 || lock_init(&cells[j]->glock) != 0 ||
-        lock_init(&cells[j]->mlock) != 0 || lock_init(&cells[j]->slock) != 0)
+    if (lock_init(&cells[j]->hydro.lock) != 0 ||
+        lock_init(&cells[j]->grav.plock) != 0 ||
+        lock_init(&cells[j]->grav.mlock) != 0 ||
+        lock_init(&cells[j]->stars.lock) != 0)
       error("Failed to initialize cell spinlocks.");
   }
 }
@@ -2317,33 +3242,62 @@ void space_getcells(struct space *s, int nr_cells, struct cell **cells) {
 void space_free_buff_sort_indices(struct space *s) {
   for (struct cell *finger = s->cells_sub; finger != NULL;
        finger = finger->next) {
-    for (int k = 0; k < 13; k++)
-      if (finger->sort[k] != NULL) {
-        free(finger->sort[k]);
-        finger->sort[k] = NULL;
+    for (int k = 0; k < 13; k++) {
+      if (finger->hydro.sort[k] != NULL) {
+        free(finger->hydro.sort[k]);
+        finger->hydro.sort[k] = NULL;
       }
+      if (finger->stars.sort[k] != NULL) {
+        free(finger->stars.sort[k]);
+        finger->stars.sort[k] = NULL;
+      }
+    }
   }
 }
 
 /**
  * @brief Construct the list of top-level cells that have any tasks in
- * their hierarchy.
+ * their hierarchy on this MPI rank. Also construct the list of top-level
+ * cells on any rank that have > 0 particles (of any kind).
  *
  * This assumes the list has been pre-allocated at a regrid.
  *
  * @param s The #space.
  */
-void space_list_cells_with_tasks(struct space *s) {
+void space_list_useful_top_level_cells(struct space *s) {
 
-  /* Let's rebuild the list of local top-level cells */
-  s->nr_local_cells = 0;
-  for (int i = 0; i < s->nr_cells; ++i)
-    if (cell_has_tasks(&s->cells_top[i])) {
-      s->local_cells_top[s->nr_local_cells] = i;
-      s->nr_local_cells++;
+  const ticks tic = getticks();
+
+  s->nr_local_cells_with_tasks = 0;
+  s->nr_cells_with_particles = 0;
+
+  for (int i = 0; i < s->nr_cells; ++i) {
+    struct cell *c = &s->cells_top[i];
+
+    if (cell_has_tasks(c)) {
+      s->local_cells_with_tasks_top[s->nr_local_cells_with_tasks] = i;
+      s->nr_local_cells_with_tasks++;
+    }
+
+    const int has_particles =
+        (c->hydro.count > 0) || (c->grav.count > 0) || (c->stars.count > 0) ||
+        (c->grav.multipole != NULL && c->grav.multipole->m_pole.M_000 > 0.f);
+
+    if (has_particles) {
+      s->cells_with_particles_top[s->nr_cells_with_particles] = i;
+      s->nr_cells_with_particles++;
     }
+  }
+  if (s->e->verbose) {
+    message("Have %d local top-level cells with tasks (total=%d)",
+            s->nr_local_cells_with_tasks, s->nr_cells);
+    message("Have %d top-level cells with particles (total=%d)",
+            s->nr_cells_with_particles, s->nr_cells);
+  }
+
   if (s->e->verbose)
-    message("Have %d local cells (total=%d)", s->nr_local_cells, s->nr_cells);
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts,
@@ -2376,7 +3330,7 @@ void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts,
       xp->v_full[2] = gp->v_full[2];
     }
 
-    else if (gp->type == swift_type_star) {
+    else if (gp->type == swift_type_stars) {
 
       /* Get it's stellar friend */
       struct spart *sp = &s->sparts[-gp->id_or_neg_offset];
@@ -2391,11 +3345,17 @@ void space_synchronize_particle_positions_mapper(void *map_data, int nr_gparts,
 
 void space_synchronize_particle_positions(struct space *s) {
 
+  const ticks tic = getticks();
+
   if ((s->nr_gparts > 0 && s->nr_parts > 0) ||
       (s->nr_gparts > 0 && s->nr_sparts > 0))
     threadpool_map(&s->e->threadpool,
                    space_synchronize_particle_positions_mapper, s->gparts,
                    s->nr_gparts, sizeof(struct gpart), 0, (void *)s);
+
+  if (s->e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 }
 
 void space_first_init_parts_mapper(void *restrict map_data, int count,
@@ -2416,11 +3376,28 @@ void space_first_init_parts_mapper(void *restrict map_data, int count,
 
   const struct hydro_props *hydro_props = s->e->hydro_properties;
   const float u_init = hydro_props->initial_internal_energy;
-  const float u_min = hydro_props->minimal_internal_energy;
+  const float hydro_h_min_ratio = e->hydro_properties->h_min_ratio;
+
+  const struct gravity_props *grav_props = s->e->gravity_properties;
+  const int with_gravity = e->policy & engine_policy_self_gravity;
 
   const struct chemistry_global_data *chemistry = e->chemistry;
+  const struct star_formation *star_formation = e->star_formation;
   const struct cooling_function_data *cool_func = e->cooling_func;
 
+  /* Check that the smoothing lengths are non-zero */
+  for (int k = 0; k < count; k++) {
+    if (p[k].h <= 0.)
+      error("Invalid value of smoothing length for part %lld h=%e", p[k].id,
+            p[k].h);
+
+    if (with_gravity) {
+      const struct gpart *gp = p[k].gpart;
+      const float softening = gravity_get_softening(gp, grav_props);
+      p->h = max(p->h, softening * hydro_h_min_ratio);
+    }
+  }
+
   /* Convert velocities to internal units */
   for (int k = 0; k < count; k++) {
     p[k].v[0] *= a_factor_vel;
@@ -2442,17 +3419,27 @@ void space_first_init_parts_mapper(void *restrict map_data, int count,
   for (int k = 0; k < count; k++) {
 
     hydro_first_init_part(&p[k], &xp[k]);
+#ifdef WITH_LOGGER
+    logger_part_data_init(&xp[k].logger_data);
+#endif
 
     /* Overwrite the internal energy? */
     if (u_init > 0.f) hydro_set_init_internal_energy(&p[k], u_init);
-    if (u_min > 0.f) hydro_set_init_internal_energy(&p[k], u_min);
 
     /* Also initialise the chemistry */
     chemistry_first_init_part(phys_const, us, cosmo, chemistry, &p[k], &xp[k]);
 
+    /* Also initialise the star formation */
+    star_formation_first_init_part(phys_const, us, cosmo, star_formation, &p[k],
+                                   &xp[k]);
+
     /* And the cooling */
     cooling_first_init_part(phys_const, us, cosmo, cool_func, &p[k], &xp[k]);
 
+    /* And the tracers */
+    tracers_first_init_xpart(&p[k], &xp[k], us, phys_const, cosmo, hydro_props,
+                             cool_func);
+
 #ifdef SWIFT_DEBUG_CHECKS
     /* Check part->gpart->part linkeage. */
     if (p[k].gpart && p[k].gpart->id_or_neg_offset != -(k + delta))
@@ -2546,12 +3533,17 @@ void space_first_init_sparts_mapper(void *restrict map_data, int count,
 
   struct spart *restrict sp = (struct spart *)map_data;
   const struct space *restrict s = (struct space *)extra_data;
+  const struct engine *e = s->e;
 
 #ifdef SWIFT_DEBUG_CHECKS
   const ptrdiff_t delta = sp - s->sparts;
 #endif
 
-  const struct cosmology *cosmo = s->e->cosmology;
+  const float initial_h = s->initial_spart_h;
+
+  const int with_feedback = (e->policy & engine_policy_feedback);
+
+  const struct cosmology *cosmo = e->cosmology;
   const float a_factor_vel = cosmo->a;
 
   /* Convert velocities to internal units */
@@ -2561,6 +3553,11 @@ void space_first_init_sparts_mapper(void *restrict map_data, int count,
     sp[k].v[1] *= a_factor_vel;
     sp[k].v[2] *= a_factor_vel;
 
+    /* Imposed smoothing length from parameter file */
+    if (initial_h != -1.f) {
+      sp[k].h = initial_h;
+    }
+
 #ifdef HYDRO_DIMENSION_2D
     sp[k].x[2] = 0.f;
     sp[k].v[2] = 0.f;
@@ -2572,10 +3569,17 @@ void space_first_init_sparts_mapper(void *restrict map_data, int count,
 #endif
   }
 
+  /* Check that the smoothing lengths are non-zero */
+  for (int k = 0; k < count; k++) {
+    if (with_feedback && sp[k].h <= 0.)
+      error("Invalid value of smoothing length for spart %lld h=%e", sp[k].id,
+            sp[k].h);
+  }
+
   /* Initialise the rest */
   for (int k = 0; k < count; k++) {
 
-    star_first_init_spart(&sp[k]);
+    stars_first_init_spart(&sp[k]);
 
 #ifdef SWIFT_DEBUG_CHECKS
     if (sp[k].gpart && sp[k].gpart->id_or_neg_offset != -(k + delta))
@@ -2591,7 +3595,7 @@ void space_first_init_sparts_mapper(void *restrict map_data, int count,
 /**
  * @brief Initialises all the s-particles by setting them into a valid state
  *
- * Calls star_first_init_spart() on all the particles
+ * Calls stars_first_init_spart() on all the particles
  */
 void space_first_init_sparts(struct space *s, int verbose) {
   const ticks tic = getticks();
@@ -2656,16 +3660,46 @@ void space_init_gparts(struct space *s, int verbose) {
             clocks_getunit());
 }
 
+void space_init_sparts_mapper(void *restrict map_data, int scount,
+                              void *restrict extra_data) {
+
+  struct spart *restrict sparts = (struct spart *)map_data;
+  for (int k = 0; k < scount; k++) stars_init_spart(&sparts[k]);
+}
+
+/**
+ * @brief Calls the #spart initialisation function on all particles in the
+ * space.
+ *
+ * @param s The #space.
+ * @param verbose Are we talkative?
+ */
+void space_init_sparts(struct space *s, int verbose) {
+
+  const ticks tic = getticks();
+
+  if (s->nr_sparts > 0)
+    threadpool_map(&s->e->threadpool, space_init_sparts_mapper, s->sparts,
+                   s->nr_sparts, sizeof(struct spart), 0, NULL);
+  if (verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
+}
+
 void space_convert_quantities_mapper(void *restrict map_data, int count,
                                      void *restrict extra_data) {
   struct space *s = (struct space *)extra_data;
   const struct cosmology *cosmo = s->e->cosmology;
+  const struct hydro_props *hydro_props = s->e->hydro_properties;
   struct part *restrict parts = (struct part *)map_data;
   const ptrdiff_t index = parts - s->parts;
   struct xpart *restrict xparts = s->xparts + index;
 
+  /* Loop over all the particles ignoring the extra buffer ones for on-the-fly
+   * creation */
   for (int k = 0; k < count; k++)
-    hydro_convert_quantities(&parts[k], &xparts[k], cosmo);
+    if (parts[k].time_bin <= num_time_bins)
+      hydro_convert_quantities(&parts[k], &xparts[k], cosmo, hydro_props);
 }
 
 /**
@@ -2697,14 +3731,16 @@ void space_convert_quantities(struct space *s, int verbose) {
  * @param dim Spatial dimensions of the domain.
  * @param parts Array of Gas particles.
  * @param gparts Array of Gravity particles.
- * @param sparts Array of star particles.
+ * @param sparts Array of stars particles.
  * @param Npart The number of Gas particles in the space.
  * @param Ngpart The number of Gravity particles in the space.
- * @param Nspart The number of star particles in the space.
+ * @param Nspart The number of stars particles in the space.
  * @param periodic flag whether the domain is periodic or not.
  * @param replicate How many replications along each direction do we want?
  * @param generate_gas_in_ics Are we generating gas particles from the gparts?
+ * @param hydro flag whether we are doing hydro or not?
  * @param self_gravity flag whether we are doing gravity or not?
+ * @param star_formation flag whether we are doing star formation or not?
  * @param verbose Print messages to stdout or not.
  * @param dry_run If 1, just initialise stuff, don't do anything with the parts.
  *
@@ -2717,8 +3753,9 @@ void space_init(struct space *s, struct swift_params *params,
                 const struct cosmology *cosmo, double dim[3],
                 struct part *parts, struct gpart *gparts, struct spart *sparts,
                 size_t Npart, size_t Ngpart, size_t Nspart, int periodic,
-                int replicate, int generate_gas_in_ics, int self_gravity,
-                int verbose, int dry_run) {
+                int replicate, int generate_gas_in_ics, int hydro,
+                int self_gravity, int star_formation, int verbose,
+                int dry_run) {
 
   /* Clean-up everything */
   bzero(s, sizeof(struct space));
@@ -2728,15 +3765,23 @@ void space_init(struct space *s, struct swift_params *params,
   s->dim[1] = dim[1];
   s->dim[2] = dim[2];
   s->periodic = periodic;
-  s->gravity = self_gravity;
+  s->with_self_gravity = self_gravity;
+  s->with_hydro = hydro;
+  s->with_star_formation = star_formation;
   s->nr_parts = Npart;
-  s->size_parts = Npart;
-  s->parts = parts;
   s->nr_gparts = Ngpart;
-  s->size_gparts = Ngpart;
-  s->gparts = gparts;
   s->nr_sparts = Nspart;
+  s->size_parts = Npart;
+  s->size_gparts = Ngpart;
   s->size_sparts = Nspart;
+  s->nr_inhibited_parts = 0;
+  s->nr_inhibited_gparts = 0;
+  s->nr_inhibited_sparts = 0;
+  s->nr_extra_parts = 0;
+  s->nr_extra_gparts = 0;
+  s->nr_extra_sparts = 0;
+  s->parts = parts;
+  s->gparts = gparts;
   s->sparts = sparts;
   s->min_part_mass = FLT_MAX;
   s->min_gpart_mass = FLT_MAX;
@@ -2748,7 +3793,7 @@ void space_init(struct space *s, struct swift_params *params,
 
   /* Are we generating gas from the DM-only ICs? */
   if (generate_gas_in_ics) {
-    space_generate_gas(s, cosmo, verbose);
+    space_generate_gas(s, cosmo, periodic, dim, verbose);
     parts = s->parts;
     gparts = s->gparts;
     Npart = s->nr_parts;
@@ -2810,13 +3855,20 @@ void space_init(struct space *s, struct swift_params *params,
                                space_subsize_self_grav_default);
   space_splitsize = parser_get_opt_param_int(
       params, "Scheduler:cell_split_size", space_splitsize_default);
-  space_subdepth_grav = parser_get_opt_param_int(
-      params, "Scheduler:cell_subdepth_grav", space_subdepth_grav_default);
+  space_subdepth_diff_grav =
+      parser_get_opt_param_int(params, "Scheduler:cell_subdepth_diff_grav",
+                               space_subdepth_diff_grav_default);
+  space_extra_parts = parser_get_opt_param_int(
+      params, "Scheduler:cell_extra_parts", space_extra_parts_default);
+  space_extra_sparts = parser_get_opt_param_int(
+      params, "Scheduler:cell_extra_sparts", space_extra_sparts_default);
+  space_extra_gparts = parser_get_opt_param_int(
+      params, "Scheduler:cell_extra_gparts", space_extra_gparts_default);
 
   if (verbose) {
     message("max_size set to %d split_size set to %d", space_maxsize,
             space_splitsize);
-    message("subdepth_grav set to %d", space_subdepth_grav);
+    message("subdepth_grav set to %d", space_subdepth_diff_grav);
     message("sub_size_pair_hydro set to %d, sub_size_self_hydro set to %d",
             space_subsize_pair_hydro, space_subsize_self_hydro);
     message("sub_size_pair_grav set to %d, sub_size_self_grav set to %d",
@@ -2831,6 +3883,13 @@ void space_init(struct space *s, struct swift_params *params,
     for (size_t k = 0; k < Npart; k++) parts[k].h *= scaling;
   }
 
+  /* Read in imposed star smoothing length */
+  s->initial_spart_h = parser_get_opt_param_float(
+      params, "InitialConditions:stars_smoothing_length", -1.f);
+  if (s->initial_spart_h != -1.f) {
+    message("Imposing a star smoothing length of %e", s->initial_spart_h);
+  }
+
   /* Apply shift */
   double shift[3] = {0.0, 0.0, 0.0};
   parser_get_opt_param_double_array(params, "InitialConditions:shift", 3,
@@ -2913,6 +3972,13 @@ void space_init(struct space *s, struct swift_params *params,
   /* Init the space lock. */
   if (lock_init(&s->lock) != 0) error("Failed to create space spin-lock.");
 
+#ifdef SWIFT_DEBUG_CHECKS
+  last_cell_id = 1;
+#endif
+
+  /* Do we want any spare particles for on the fly cration? */
+  if (!star_formation) space_extra_sparts = 0;
+
   /* Build the cells recursively. */
   if (!dry_run) space_regrid(s, verbose);
 }
@@ -3042,8 +4108,30 @@ void space_replicate(struct space *s, int replicate, int verbose) {
 #endif
 }
 
+/**
+ * @brief Duplicate all the dark matter particles to create the same number
+ * of gas particles with mass ratios given by the cosmology.
+ *
+ * Note that this function alters the dark matter particle masses and positions.
+ * Velocities are unchanged. We also leave the thermodynamic properties of the
+ * gas un-initialised as they will be given a value from the parameter file at a
+ * later stage.
+ *
+ * @param s The #space to create the particles in.
+ * @param cosmo The current #cosmology model.
+ * @param periodic Are we using periodic boundary conditions?
+ * @param dim The size of the box (for periodic wrapping).
+ * @param verbose Are we talkative?
+ */
 void space_generate_gas(struct space *s, const struct cosmology *cosmo,
-                        int verbose) {
+                        int periodic, const double dim[3], int verbose) {
+
+  /* Check that this is a sensible ting to do */
+  if (!s->with_hydro)
+    error(
+        "Cannot generate gas from ICs if we are running without "
+        "hydrodynamics. Need to run with -s and the corresponding "
+        "hydrodynamics parameters in the YAML file.");
 
   if (verbose) message("Generating gas particles from gparts");
 
@@ -3080,7 +4168,7 @@ void space_generate_gas(struct space *s, const struct cosmology *cosmo,
 
   /* Compute some constants */
   const double mass_ratio = cosmo->Omega_b / cosmo->Omega_m;
-  const double bg_density = cosmo->Omega_m * cosmo->critical_density;
+  const double bg_density = cosmo->Omega_m * cosmo->critical_density_0;
   const double bg_density_inv = 1. / bg_density;
 
   /* Update the particle properties */
@@ -3094,9 +4182,11 @@ void space_generate_gas(struct space *s, const struct cosmology *cosmo,
     p->id = gp_gas->id_or_neg_offset * 2 + 1;
     gp_dm->id_or_neg_offset *= 2;
 
-    if (gp_dm->id_or_neg_offset <= 0) error("DM particle ID overflowd");
+    if (gp_dm->id_or_neg_offset < 0)
+      error("DM particle ID overflowd (DM id=%lld gas id=%lld)",
+            gp_dm->id_or_neg_offset, p->id);
 
-    if (p->id <= 0) error("gas particle ID overflowd");
+    if (p->id < 0) error("gas particle ID overflowd (id=%lld)", p->id);
 
     /* Set the links correctly */
     p->gpart = gp_gas;
@@ -3105,8 +4195,8 @@ void space_generate_gas(struct space *s, const struct cosmology *cosmo,
 
     /* Compute positions shift */
     const double d = cbrt(gp_dm->mass * bg_density_inv);
-    const double shift_dm = d * mass_ratio;
-    const double shift_gas = d * (1. - mass_ratio);
+    const double shift_dm = 0.5 * d * mass_ratio;
+    const double shift_gas = 0.5 * d * (1. - mass_ratio);
 
     /* Set the masses */
     gp_dm->mass *= (1. - mass_ratio);
@@ -3117,20 +4207,37 @@ void space_generate_gas(struct space *s, const struct cosmology *cosmo,
     gp_dm->x[0] += shift_dm;
     gp_dm->x[1] += shift_dm;
     gp_dm->x[2] += shift_dm;
-    gp_gas->x[0] += shift_gas;
-    gp_gas->x[1] += shift_gas;
-    gp_gas->x[2] += shift_gas;
+    gp_gas->x[0] -= shift_gas;
+    gp_gas->x[1] -= shift_gas;
+    gp_gas->x[2] -= shift_gas;
+
+    /* Make sure the positions are identical between linked particles */
     p->x[0] = gp_gas->x[0];
     p->x[1] = gp_gas->x[1];
     p->x[2] = gp_gas->x[2];
 
+    /* Box-wrap the whole thing to be safe */
+    if (periodic) {
+      gp_dm->x[0] = box_wrap(gp_dm->x[0], 0., dim[0]);
+      gp_dm->x[1] = box_wrap(gp_dm->x[1], 0., dim[1]);
+      gp_dm->x[2] = box_wrap(gp_dm->x[2], 0., dim[2]);
+      gp_gas->x[0] = box_wrap(gp_gas->x[0], 0., dim[0]);
+      gp_gas->x[1] = box_wrap(gp_gas->x[1], 0., dim[1]);
+      gp_gas->x[2] = box_wrap(gp_gas->x[2], 0., dim[2]);
+      p->x[0] = box_wrap(p->x[0], 0., dim[0]);
+      p->x[1] = box_wrap(p->x[1], 0., dim[1]);
+      p->x[2] = box_wrap(p->x[2], 0., dim[2]);
+    }
+
     /* Also copy the velocities */
     p->v[0] = gp_gas->v_full[0];
     p->v[1] = gp_gas->v_full[1];
     p->v[2] = gp_gas->v_full[2];
 
     /* Set the smoothing length to the mean inter-particle separation */
-    p->h = 30. * d;
+    p->h = d;
+
+    /* Note that the thermodynamic properties (u, S, ...) will be set later */
   }
 
   /* Replace the content of the space */
@@ -3218,6 +4325,7 @@ void space_check_drift_point(struct space *s, integertime_t ti_drift,
   /* Recursively check all cells */
   space_map_cells_pre(s, 1, cell_check_part_drift_point, &ti_drift);
   space_map_cells_pre(s, 1, cell_check_gpart_drift_point, &ti_drift);
+  space_map_cells_pre(s, 1, cell_check_spart_drift_point, &ti_drift);
   if (multipole)
     space_map_cells_pre(s, 1, cell_check_multipole_drift_point, &ti_drift);
 #else
@@ -3253,6 +4361,49 @@ void space_check_timesteps(struct space *s) {
 #endif
 }
 
+/**
+ * @brief #threadpool mapper function for the limiter debugging check
+ */
+void space_check_limiter_mapper(void *map_data, int nr_parts,
+                                void *extra_data) {
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Unpack the data */
+  struct part *restrict parts = (struct part *)map_data;
+
+  /* Verify that all limited particles have been treated */
+  for (int k = 0; k < nr_parts; k++) {
+
+    if (parts[k].time_bin == time_bin_inhibited) continue;
+
+    if (parts[k].wakeup == time_bin_awake)
+      error("Particle still woken up! id=%lld", parts[k].id);
+
+    if (parts[k].gpart != NULL)
+      if (parts[k].time_bin != parts[k].gpart->time_bin)
+        error("Gpart not on the same time-bin as part");
+  }
+#else
+  error("Calling debugging code without debugging flag activated.");
+#endif
+}
+
+/**
+ * @brief Checks that all particles have their wakeup flag in a correct state.
+ *
+ * Should only be used for debugging purposes.
+ *
+ * @param s The #space to check.
+ */
+void space_check_limiter(struct space *s) {
+#ifdef SWIFT_DEBUG_CHECKS
+
+  threadpool_map(&s->e->threadpool, space_check_limiter_mapper, s->parts,
+                 s->nr_parts, sizeof(struct part), 1000, NULL);
+#else
+  error("Calling debugging code without debugging flag activated.");
+#endif
+}
+
 /**
  * @brief Resets all the individual cell task counters to 0.
  *
@@ -3279,6 +4430,9 @@ void space_clean(struct space *s) {
   free(s->cells_top);
   free(s->multipoles_top);
   free(s->local_cells_top);
+  free(s->local_cells_with_tasks_top);
+  free(s->cells_with_particles_top);
+  free(s->local_cells_with_particles_top);
   free(s->parts);
   free(s->xparts);
   free(s->gparts);
@@ -3330,7 +4484,11 @@ void space_struct_restore(struct space *s, FILE *stream) {
   s->multipoles_top = NULL;
   s->multipoles_sub = NULL;
   s->local_cells_top = NULL;
-  s->grav_top_level = NULL;
+  s->local_cells_with_tasks_top = NULL;
+  s->cells_with_particles_top = NULL;
+  s->local_cells_with_particles_top = NULL;
+  s->nr_local_cells_with_tasks = 0;
+  s->nr_cells_with_particles = 0;
 #ifdef WITH_MPI
   s->parts_foreign = NULL;
   s->size_parts_foreign = 0;
@@ -3383,7 +4541,7 @@ void space_struct_restore(struct space *s, FILE *stream) {
                         NULL, "sparts");
   }
 
-  /* Need to reconnect the gravity parts to their hydro and star particles. */
+  /* Need to reconnect the gravity parts to their hydro and stars particles. */
   /* Re-link the parts. */
   if (s->nr_parts > 0 && s->nr_gparts > 0)
     part_relink_parts_to_gparts(s->gparts, s->nr_gparts, s->parts);
diff --git a/src/space.h b/src/space.h
index e3173ece1e2749a3afb8072b179150587a100a82..fe47a2b8b995e5872e79e755b5b8075a409795b8 100644
--- a/src/space.h
+++ b/src/space.h
@@ -35,6 +35,7 @@
 #include "lock.h"
 #include "parser.h"
 #include "part.h"
+#include "velociraptor_struct.h"
 
 /* Avoid cyclic inclusions */
 struct cell;
@@ -44,11 +45,15 @@ struct cosmology;
 #define space_cellallocchunk 1000
 #define space_splitsize_default 400
 #define space_maxsize_default 8000000
+#define space_extra_parts_default 0
+#define space_extra_gparts_default 0
+#define space_extra_sparts_default 100
+#define space_expected_max_nr_strays_default 100
 #define space_subsize_pair_hydro_default 256000000
 #define space_subsize_self_hydro_default 32000
 #define space_subsize_pair_grav_default 256000000
 #define space_subsize_self_grav_default 32000
-#define space_subdepth_grav_default 2
+#define space_subdepth_diff_grav_default 4
 #define space_max_top_level_cells_default 12
 #define space_stretch 1.10f
 #define space_maxreldx 0.1f
@@ -63,7 +68,12 @@ extern int space_subsize_pair_hydro;
 extern int space_subsize_self_hydro;
 extern int space_subsize_pair_grav;
 extern int space_subsize_self_grav;
-extern int space_subdepth_grav;
+extern int space_subsize_pair_stars;
+extern int space_subsize_self_stars;
+extern int space_subdepth_diff_grav;
+extern int space_extra_parts;
+extern int space_extra_gparts;
+extern int space_extra_sparts;
 
 /**
  * @brief The space in which the cells and particles reside.
@@ -79,8 +89,14 @@ struct space {
   /*! Extra space information needed for some hydro schemes. */
   struct hydro_space hs;
 
+  /*! Are we doing hydrodynamics? */
+  int with_hydro;
+
   /*! Are we doing gravity? */
-  int gravity;
+  int with_self_gravity;
+
+  /*! Are we doing star formation? */
+  int with_star_formation;
 
   /*! Width of the top-level cells. */
   double width[3];
@@ -106,9 +122,18 @@ struct space {
   /*! Total number of cells (top- and sub-) */
   int tot_cells;
 
-  /*! Number of *local* top-level cells with tasks */
+  /*! Number of *local* top-level cells */
   int nr_local_cells;
 
+  /*! Number of *local* top-level cells with tasks */
+  int nr_local_cells_with_tasks;
+
+  /*! Number of top-level cells that have >0 particle (of any kind) */
+  int nr_cells_with_particles;
+
+  /*! Number of top-level cells that have >0 particle (of any kind) */
+  int nr_local_cells_with_particles;
+
   /*! The (level 0) cells themselves. */
   struct cell *cells_top;
 
@@ -121,17 +146,53 @@ struct space {
   /*! Buffer of unused multipoles for the sub-cells. */
   struct gravity_tensors *multipoles_sub;
 
-  /*! The indices of the *local* top-level cells with tasks */
+  /*! The indices of the *local* top-level cells */
   int *local_cells_top;
 
-  /*! The total number of parts in the space. */
-  size_t nr_parts, size_parts;
+  /*! The indices of the *local* top-level cells with tasks */
+  int *local_cells_with_tasks_top;
+
+  /*! The indices of the top-level cells that have >0 particles (of any kind) */
+  int *cells_with_particles_top;
+
+  /*! The indices of the top-level cells that have >0 particles (of any kind) */
+  int *local_cells_with_particles_top;
+
+  /*! The total number of #part in the space. */
+  size_t nr_parts;
 
-  /*! The total number of g-parts in the space. */
-  size_t nr_gparts, size_gparts;
+  /*! The total number of #gpart in the space. */
+  size_t nr_gparts;
 
-  /*! The total number of g-parts in the space. */
-  size_t nr_sparts, size_sparts;
+  /*! The total number of #spart in the space. */
+  size_t nr_sparts;
+
+  /*! The total number of #part we allocated memory for */
+  size_t size_parts;
+
+  /*! The total number of #gpart we allocated memory for */
+  size_t size_gparts;
+
+  /*! The total number of #spart we allocated memory for */
+  size_t size_sparts;
+
+  /*! Number of inhibted gas particles in the space */
+  size_t nr_inhibited_parts;
+
+  /*! Number of inhibted gravity particles in the space */
+  size_t nr_inhibited_gparts;
+
+  /*! Number of inhibted star particles in the space */
+  size_t nr_inhibited_sparts;
+
+  /*! Number of extra #part we allocated (for on-the-fly creation) */
+  size_t nr_extra_parts;
+
+  /*! Number of extra #gpart we allocated (for on-the-fly creation) */
+  size_t nr_extra_gparts;
+
+  /*! Number of extra #spart we allocated (for on-the-fly creation) */
+  size_t nr_extra_sparts;
 
   /*! The particle data (cells have pointers to this). */
   struct part *parts;
@@ -145,9 +206,6 @@ struct space {
   /*! The s-particle data (cells have pointers to this). */
   struct spart *sparts;
 
-  /*! The top-level FFT task */
-  struct task *grav_top_level;
-
   /*! Minimal mass of all the #part */
   float min_part_mass;
 
@@ -166,6 +224,9 @@ struct space {
   /*! Sum of the norm of the velocity of all the #spart */
   float sum_spart_vel_norm;
 
+  /*! Initial value of the smoothing length read from the parameter file */
+  float initial_spart_h;
+
   /*! General-purpose lock for this space. */
   swift_lock_type lock;
 
@@ -175,6 +236,9 @@ struct space {
   /*! The associated engine. */
   struct engine *e;
 
+  /*! The group information returned by VELOCIraptor for each #gpart. */
+  struct velociraptor_gpart_data *gpart_group_data;
+
 #ifdef WITH_MPI
 
   /*! Buffers for parts that we will receive from foreign cells. */
@@ -192,7 +256,7 @@ struct space {
 #endif
 };
 
-/* function prototypes. */
+/* Function prototypes. */
 void space_free_buff_sort_indices(struct space *s);
 void space_parts_sort(struct part *parts, struct xpart *xparts, int *ind,
                       int *counts, int num_bins, ptrdiff_t parts_offset);
@@ -206,8 +270,8 @@ void space_init(struct space *s, struct swift_params *params,
                 const struct cosmology *cosmo, double dim[3],
                 struct part *parts, struct gpart *gparts, struct spart *sparts,
                 size_t Npart, size_t Ngpart, size_t Nspart, int periodic,
-                int replicate, int generate_gas_in_ics, int gravity,
-                int verbose, int dry_run);
+                int replicate, int generate_gas_in_ics, int hydro, int gravity,
+                int star_formation, int verbose, int dry_run);
 void space_sanitize(struct space *s);
 void space_map_cells_pre(struct space *s, int full,
                          void (*fun)(struct cell *c, void *data), void *data);
@@ -219,22 +283,25 @@ void space_map_parts_xparts(struct space *s,
                                         struct cell *c));
 void space_map_cells_post(struct space *s, int full,
                           void (*fun)(struct cell *c, void *data), void *data);
-void space_rebuild(struct space *s, int verbose);
+void space_rebuild(struct space *s, int repartitioned, int verbose);
 void space_recycle(struct space *s, struct cell *c);
 void space_recycle_list(struct space *s, struct cell *cell_list_begin,
                         struct cell *cell_list_end,
                         struct gravity_tensors *multipole_list_begin,
                         struct gravity_tensors *multipole_list_end);
-void space_split(struct space *s, struct cell *cells, int nr_cells,
-                 int verbose);
+void space_split(struct space *s, int verbose);
+void space_reorder_extras(struct space *s, int verbose);
 void space_split_mapper(void *map_data, int num_elements, void *extra_data);
-void space_list_cells_with_tasks(struct space *s);
+void space_list_useful_top_level_cells(struct space *s);
 void space_parts_get_cell_index(struct space *s, int *ind, int *cell_counts,
-                                struct cell *cells, int verbose);
+                                size_t *count_inhibited_parts,
+                                size_t *count_extra_parts, int verbose);
 void space_gparts_get_cell_index(struct space *s, int *gind, int *cell_counts,
-                                 struct cell *cells, int verbose);
+                                 size_t *count_inhibited_gparts,
+                                 size_t *count_extra_gparts, int verbose);
 void space_sparts_get_cell_index(struct space *s, int *sind, int *cell_counts,
-                                 struct cell *cells, int verbose);
+                                 size_t *count_inhibited_sparts,
+                                 size_t *count_extra_sparts, int verbose);
 void space_synchronize_particle_positions(struct space *s);
 void space_do_parts_sort(void);
 void space_do_gparts_sort(void);
@@ -244,6 +311,7 @@ void space_first_init_gparts(struct space *s, int verbose);
 void space_first_init_sparts(struct space *s, int verbose);
 void space_init_parts(struct space *s, int verbose);
 void space_init_gparts(struct space *s, int verbose);
+void space_init_sparts(struct space *s, int verbose);
 void space_convert_quantities(struct space *s, int verbose);
 void space_link_cleanup(struct space *s);
 void space_check_drift_point(struct space *s, integertime_t ti_drift,
@@ -251,9 +319,10 @@ void space_check_drift_point(struct space *s, integertime_t ti_drift,
 void space_check_top_multipoles_drift_point(struct space *s,
                                             integertime_t ti_drift);
 void space_check_timesteps(struct space *s);
+void space_check_limiter(struct space *s);
 void space_replicate(struct space *s, int replicate, int verbose);
 void space_generate_gas(struct space *s, const struct cosmology *cosmo,
-                        int verbose);
+                        int periodic, const double dim[3], int verbose);
 void space_check_cosmology(struct space *s, const struct cosmology *cosmo,
                            int rank);
 void space_reset_task_counters(struct space *s);
diff --git a/src/star_formation.c b/src/star_formation.c
new file mode 100644
index 0000000000000000000000000000000000000000..698a64cc636dd79f00feac3f6cc88bf519fe09c1
--- /dev/null
+++ b/src/star_formation.c
@@ -0,0 +1,83 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* This object's header. */
+#include "part.h"
+#include "restart.h"
+#include "star_formation.h"
+#include "units.h"
+
+/**
+ * @brief  Initialises the star formation law properties in the internal
+ * unit system.
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us the current internal system of units
+ * @param hydro_props The propoerties of the hydro scheme.
+ * @param starform the properties of the star formation law
+ */
+void starformation_init(struct swift_params* parameter_file,
+                        const struct phys_const* phys_const,
+                        const struct unit_system* us,
+                        const struct hydro_props* hydro_props,
+                        struct star_formation* starform) {
+
+  starformation_init_backend(parameter_file, phys_const, us, hydro_props,
+                             starform);
+}
+
+/**
+ * @brief Print the properties of the star fromation law
+ *
+ * @param starform the star formation properties.
+ */
+void starformation_print(const struct star_formation* starform) {
+
+  starformation_print_backend(starform);
+}
+
+/**
+ * @brief Write an star_formation struct to the given FILE as a stream of
+ * bytes.
+ *
+ * @param starform the star formation struct
+ * @param stream the file stream
+ */
+void starformation_struct_dump(const struct star_formation* starform,
+                               FILE* stream) {
+  restart_write_blocks((void*)starform, sizeof(struct star_formation), 1,
+                       stream, "starformation", "star formation");
+}
+
+/**
+ * @brief Restore a star_formation struct from the given FILE as a stream of
+ * bytes.
+ *
+ * @param starform the star formation struct
+ * @param stream the file stream
+ */
+void starformation_struct_restore(const struct star_formation* starform,
+                                  FILE* stream) {
+  restart_read_blocks((void*)starform, sizeof(struct star_formation), 1, stream,
+                      NULL, "star formation");
+}
diff --git a/src/star_formation.h b/src/star_formation.h
new file mode 100644
index 0000000000000000000000000000000000000000..5f873d2da142ec6fda90e986b523f60f7ef0d4ef
--- /dev/null
+++ b/src/star_formation.h
@@ -0,0 +1,57 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_H
+#define SWIFT_STAR_FORMATION_H
+
+/**
+ * @file src/star_formation.h
+ * @brief Branches between the different star formation recipies.
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right star formation law definition */
+#if defined(STAR_FORMATION_NONE)
+#include "./star_formation/none/star_formation.h"
+#elif defined(STAR_FORMATION_EAGLE)
+#include "./star_formation/EAGLE/star_formation.h"
+#elif defined(STAR_FORMATION_GEAR)
+#include "./star_formation/GEAR/star_formation.h"
+#else
+#error "Invalid choice of star formation law"
+#endif
+
+/* General functions defined in the source file */
+void starformation_init(struct swift_params* parameter_file,
+                        const struct phys_const* phys_const,
+                        const struct unit_system* us,
+                        const struct hydro_props* hydro_props,
+                        struct star_formation* starform);
+
+void starformation_print(const struct star_formation* starform);
+
+/* Dump store */
+void starformation_struct_dump(const struct star_formation* starform,
+                               FILE* stream);
+
+void starformation_struct_restore(const struct star_formation* starform,
+                                  FILE* stream);
+
+#endif /* SWIFT_STAR_FORMATION_H */
diff --git a/src/star_formation/EAGLE/star_formation.h b/src/star_formation/EAGLE/star_formation.h
new file mode 100644
index 0000000000000000000000000000000000000000..b72bb38babaca51b3875147d04c46f2de95de1a7
--- /dev/null
+++ b/src/star_formation/EAGLE/star_formation.h
@@ -0,0 +1,694 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *******************************************************************************/
+#ifndef SWIFT_EAGLE_STAR_FORMATION_H
+#define SWIFT_EAGLE_STAR_FORMATION_H
+
+/* Local includes */
+#include "adiabatic_index.h"
+#include "cooling.h"
+#include "cosmology.h"
+#include "engine.h"
+#include "equation_of_state.h"
+#include "hydro.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "random.h"
+#include "stars.h"
+#include "units.h"
+
+/**
+ * @file src/star_formation/EAGLE/star_formation.h
+ * @brief Star formation model used in the EAGLE model
+ */
+
+/**
+ * @brief Properties of the EAGLE star formation model.
+ */
+struct star_formation {
+
+  /*! Normalization of the KS star formation law (internal units) */
+  double KS_normalization;
+
+  /*! Normalization of the KS star formation law (Msun / kpc^2 / yr) */
+  double KS_normalization_MSUNpYRpKPC2;
+
+  /*! Slope of the KS law */
+  double KS_power_law;
+
+  /*! Slope of the high density KS law */
+  double KS_high_den_power_law;
+
+  /*! KS law High density threshold (internal units) */
+  double KS_high_den_thresh;
+
+  /*! KS high density normalization (internal units) */
+  double KS_high_den_normalization;
+
+  /*! KS high density normalization (H atoms per cm^3)  */
+  double KS_high_den_thresh_HpCM3;
+
+  /*! Critical overdensity */
+  double min_over_den;
+
+  /*! Dalla Vecchia & Schaye temperature criteria */
+  double temperature_margin_threshold_dex;
+
+  /*! 10^Tdex of Dalla Vecchia & SChaye temperature criteria */
+  double ten_to_temperature_margin_threshold_dex;
+
+  /*! gas fraction */
+  double fgas;
+
+  /*! Star formation law slope */
+  double SF_power_law;
+
+  /*! star formation normalization (internal units) */
+  double SF_normalization;
+
+  /*! star formation high density slope */
+  double SF_high_den_power_law;
+
+  /*! Star formation high density normalization (internal units) */
+  double SF_high_den_normalization;
+
+  /*! Density threshold to form stars (internal units) */
+  double density_threshold;
+
+  /*! Density threshold to form stars in user units */
+  double density_threshold_HpCM3;
+
+  /*! Maximum density threshold to form stars (internal units) */
+  double density_threshold_max;
+
+  /*! Maximum density threshold to form stars (H atoms per cm^3) */
+  double density_threshold_max_HpCM3;
+
+  /*! Reference metallicity for metal-dependant threshold */
+  double Z0;
+
+  /*! Inverse of reference metallicity */
+  double Z0_inv;
+
+  /*! critical density Metallicity power law (internal units) */
+  double n_Z0;
+
+  /*! Polytropic index */
+  double EOS_polytropic_index;
+
+  /*! EOS density norm (H atoms per cm^3) */
+  double EOS_density_norm_HpCM3;
+
+  /*! EOS Temperature norm (Kelvin)  */
+  double EOS_temperature_norm_K;
+
+  /*! EOS pressure norm, eq. 13 of Schaye & Dalla Vecchia 2008 (internal units)
+   */
+  double EOS_pressure_c;
+
+  /*! EOS Temperarure norm, eq. 13 of Schaye & Dalla Vecchia 2008 (internal
+   * units) */
+  double EOS_temperature_c;
+
+  /*! EOS density norm, eq. 13 of Schaye & Dalla Vecchia 2008 (internal units)
+   */
+  double EOS_density_c;
+
+  /*! Inverse of EOS density norm (internal units) */
+  double EOS_density_c_inv;
+
+  /*! Max physical density (H atoms per cm^3)*/
+  double max_gas_density_HpCM3;
+
+  /*! Max physical density (internal units) */
+  double max_gas_density;
+};
+
+/**
+ * @brief Computes the density threshold for star-formation fo a given total
+ * metallicity.
+ *
+ * Follows Schaye (2004) eq. 19 and 24 (see also Schaye et al. 2015, eq. 2).
+ *
+ * @param Z The metallicity (metal mass fraction).
+ * @param starform The properties of the star formation model.
+ * @param phys_const The physical constants.
+ * @return The physical density threshold for star formation in internal units.
+ */
+INLINE static double star_formation_threshold(
+    const double Z, const struct star_formation* starform,
+    const struct phys_const* phys_const) {
+
+  double density_threshold;
+
+  /* Schaye (2004), eq. 19 and 24 */
+  if (Z > 0.) {
+    density_threshold = starform->density_threshold *
+                        powf(Z * starform->Z0_inv, starform->n_Z0);
+    density_threshold = min(density_threshold, starform->density_threshold_max);
+  } else {
+    density_threshold = starform->density_threshold_max;
+  }
+
+  /* Convert to mass density */
+  return density_threshold * phys_const->const_proton_mass;
+}
+
+/**
+ * @brief Compute the pressure on the polytropic equation of state for a given
+ * Hydrogen number density.
+ *
+ * Schaye & Dalla Vecchia 2008, eq. 13.
+ *
+ * @param n_H The Hydrogen number density in internal units.
+ * @param starform The properties of the star formation model.
+ * @return The pressure on the equation of state in internal units.
+ */
+INLINE static double EOS_pressure(const double n_H,
+                                  const struct star_formation* starform) {
+
+  return starform->EOS_pressure_c *
+         pow(n_H * starform->EOS_density_c_inv, starform->EOS_polytropic_index);
+}
+
+/**
+ * @brief Compute the temperarue on the polytropic equation of state for a given
+ * Hydrogen number density.
+ *
+ * Schaye & Dalla Vecchia 2008, eq. 13 rewritten for temperature
+ *
+ * @param n_H The Hydrogen number density in internal units.
+ * @param starform The properties of the star formation model.
+ * @return The temperature on the equation of state in internal units.
+ */
+INLINE static double EOS_temperature(const double n_H,
+                                     const struct star_formation* starform) {
+
+  return starform->EOS_temperature_c *
+         pow(n_H, starform->EOS_polytropic_index - 1.);
+}
+
+/**
+ * @brief Calculate if the gas has the potential of becoming
+ * a star.
+ *
+ * @param starform the star formation law properties to use.
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cooling The cooling data struct.
+ *
+ */
+INLINE static int star_formation_is_star_forming(
+    const struct part* restrict p, const struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cooling_function_data* restrict cooling) {
+
+  /* Minimal density (converted from critical density) for star formation */
+  const double rho_crit_times_min_over_den =
+      cosmo->critical_density * starform->min_over_den;
+
+  /* Physical density of the particle */
+  const double physical_density = hydro_get_physical_density(p, cosmo);
+
+  /* Deside whether we should form stars or not,
+   * first we deterime if we have the correct over density
+   * if that is true we calculate if either the maximum density
+   * threshold is reached or if the metallicity dependent
+   * threshold is reached, after this we calculate if the
+   * temperature is appropriate */
+  if (physical_density < rho_crit_times_min_over_den) return 0;
+
+  /* In this case there are actually multiple possibilities
+   * because we also need to check if the physical density exceeded
+   * the appropriate limit */
+
+  const double Z = p->chemistry_data.smoothed_metal_mass_fraction_total;
+  const double X_H = p->chemistry_data.smoothed_metal_mass_fraction[0];
+  const double n_H = physical_density * X_H;
+
+  /* Get the density threshold */
+  const double density_threshold =
+      star_formation_threshold(Z, starform, phys_const);
+
+  /* Check if it exceeded the minimum density */
+  if (n_H < density_threshold) return 0;
+
+  /* Calculate the temperature */
+  const double temperature = cooling_get_temperature(phys_const, hydro_props,
+                                                     us, cosmo, cooling, p, xp);
+
+  /* Temperature on the equation of state */
+  const double temperature_eos = EOS_temperature(n_H, starform);
+
+  /* Check the Scahye & Dalla Vecchia 2012 EOS-based temperature critrion */
+  return (temperature <
+          temperature_eos * starform->ten_to_temperature_margin_threshold_dex);
+}
+
+/**
+ * @brief Compute the star-formation rate of a given particle and store
+ * it into the #xpart.
+ *
+ * @param p #part.
+ * @param xp the #xpart.
+ * @param starform the star formation law properties to use
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param dt_star The time-step of this particle.
+ */
+INLINE static void star_formation_compute_SFR(
+    const struct part* restrict p, struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo, const double dt_star) {
+
+  /* Abort early if time-step size is 0 */
+  if (dt_star == 0.) {
+
+    xp->sf_data.SFR = 0.f;
+    return;
+  }
+
+  /* Hydrogen number density of this particle */
+  const double physical_density = hydro_get_physical_density(p, cosmo);
+  const double X_H = p->chemistry_data.smoothed_metal_mass_fraction[0];
+  const double n_H = physical_density * X_H / phys_const->const_proton_mass;
+
+  /* Are we above the threshold for automatic star formation? */
+  if (physical_density >
+      starform->max_gas_density * phys_const->const_proton_mass) {
+
+    xp->sf_data.SFR = hydro_get_mass(p) / dt_star;
+    return;
+  }
+
+  /* Pressure on the effective EOS for this particle */
+  const double pressure = EOS_pressure(n_H, starform);
+
+  /* Calculate the specific star formation rate */
+  double SFRpergasmass;
+  if (hydro_get_physical_density(p, cosmo) <
+      starform->KS_high_den_thresh * phys_const->const_proton_mass) {
+
+    SFRpergasmass =
+        starform->SF_normalization * pow(pressure, starform->SF_power_law);
+
+  } else {
+
+    SFRpergasmass = starform->SF_high_den_normalization *
+                    pow(pressure, starform->SF_high_den_power_law);
+  }
+
+  /* Store the SFR */
+  xp->sf_data.SFR = SFRpergasmass * hydro_get_mass(p);
+}
+
+/**
+ * @brief Decides whether a particle should be converted into a
+ * star or not.
+ *
+ * Equation 21 of Schaye & Dalla Vecchia 2008.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param starform The properties of the star formation model.
+ * @param e The #engine (for random numbers).
+ * @param dt_star The time-step of this particle
+ * @return 1 if a conversion should be done, 0 otherwise.
+ */
+INLINE static int star_formation_should_convert_to_star(
+    const struct part* p, const struct xpart* xp,
+    const struct star_formation* starform, const struct engine* e,
+    const double dt_star) {
+
+  /* Calculate the propability of forming a star */
+  const double prob = xp->sf_data.SFR * dt_star / hydro_get_mass(p);
+
+  /* Get a unique random number between 0 and 1 for star formation */
+  const double random_number =
+      random_unit_interval(p->id, e->ti_current, random_number_star_formation);
+
+  /* Have we been lucky and need to form a star? */
+  return (prob > random_number);
+}
+
+/**
+ * @brief Update the SF properties of a particle that is not star forming.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param e The #engine.
+ * @param starform The properties of the star formation model.
+ * @param with_cosmology Are we running with cosmology switched on?
+ */
+INLINE static void star_formation_update_part_not_SFR(
+    struct part* p, struct xpart* xp, const struct engine* e,
+    const struct star_formation* starform, const int with_cosmology) {
+
+  /* Check if it is the first time steps after star formation */
+  if (xp->sf_data.SFR > 0.f) {
+
+    /* Record the current time as an indicator of when this particle was last
+       star-forming. */
+    if (with_cosmology) {
+      xp->sf_data.SFR = -e->cosmology->a;
+    } else {
+      xp->sf_data.SFR = -e->time;
+    }
+  }
+}
+
+/**
+ * @brief Copies the properties of the gas particle over to the
+ * star particle
+ *
+ * @param e The #engine
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param sp the new created star particle with its properties.
+ * @param starform the star formation law properties to use.
+ * @param cosmo the cosmological parameters and properties.
+ * @param with_cosmology if we run with cosmology.
+ */
+INLINE static void star_formation_copy_properties(
+    const struct part* p, const struct xpart* xp, struct spart* sp,
+    const struct engine* e, const struct star_formation* starform,
+    const struct cosmology* cosmo, const int with_cosmology) {
+
+  /* Store the current mass */
+  sp->mass = hydro_get_mass(p);
+
+  /* Store the current mass as the initial mass */
+  sp->mass_init = hydro_get_mass(p);
+
+  /* Store either the birth_scale_factor or birth_time depending  */
+  if (with_cosmology) {
+    sp->birth_scale_factor = cosmo->a;
+  } else {
+    sp->birth_time = e->time;
+  }
+
+  /* Store the chemistry struct in the star particle */
+  sp->chemistry_data = p->chemistry_data;
+
+  /* Store the tracers data */
+  sp->tracers_data = xp->tracers_data;
+
+  /* Store the birth density in the star particle */
+  sp->birth_density = hydro_get_physical_density(p, cosmo);
+}
+
+/**
+ * @brief initialization of the star formation law
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us The current internal system of units.
+ * @param hydro_props The propertis of the hydro model.
+ * @param starform the star formation law properties to initialize
+ */
+INLINE static void starformation_init_backend(
+    struct swift_params* parameter_file, const struct phys_const* phys_const,
+    const struct unit_system* us, const struct hydro_props* hydro_props,
+    struct star_formation* starform) {
+
+  /* Get the Gravitational constant */
+  const double G_newton = phys_const->const_newton_G;
+
+  /* Initial Hydrogen abundance (mass fraction) */
+  const double X_H = hydro_props->hydrogen_mass_fraction;
+
+  /* Mean molecular weight assuming neutral gas */
+  const double mean_molecular_weight = hydro_props->mu_neutral;
+
+  /* Get the surface density unit Msun / pc^2 in internal units */
+  const double Msun_per_pc2 =
+      phys_const->const_solar_mass /
+      (phys_const->const_parsec * phys_const->const_parsec);
+
+  /* Get the SF surface density unit Msun / kpc^2 / yr in internal units */
+  const double kpc = 1000. * phys_const->const_parsec;
+  const double Msun_per_kpc2_per_year =
+      phys_const->const_solar_mass / (kpc * kpc) / phys_const->const_year;
+
+  /* Conversion of number density from cgs */
+  const double number_density_from_cgs =
+      1. / units_cgs_conversion_factor(us, UNIT_CONV_NUMBER_DENSITY);
+
+  /* Quantities that have to do with the Normal Kennicutt-
+   * Schmidt law will be read in this part of the code*/
+
+  /* Load the equation of state for this model */
+  starform->EOS_polytropic_index = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:EOS_gamma_effective");
+  starform->EOS_temperature_norm_K = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:EOS_temperature_norm_K");
+  starform->EOS_density_norm_HpCM3 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:EOS_density_norm_H_p_cm3");
+  starform->EOS_density_c =
+      starform->EOS_density_norm_HpCM3 * number_density_from_cgs;
+  starform->EOS_density_c_inv = 1. / starform->EOS_density_c;
+
+  /* Calculate the EOS pressure normalization */
+  starform->EOS_pressure_c =
+      starform->EOS_density_c * starform->EOS_temperature_norm_K *
+      phys_const->const_boltzmann_k / mean_molecular_weight / X_H;
+
+  /* Normalisation of the temperature in the EOS calculatio */
+  starform->EOS_temperature_c =
+      starform->EOS_pressure_c / phys_const->const_boltzmann_k;
+  starform->EOS_temperature_c *=
+      pow(starform->EOS_density_c, starform->EOS_polytropic_index);
+
+  /* Read the critical density contrast from the parameter file*/
+  starform->min_over_den = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:KS_min_over_density");
+
+  /* Read the gas fraction from the file */
+  starform->fgas = parser_get_opt_param_double(
+      parameter_file, "EAGLEStarFormation:gas_fraction", 1.);
+
+  /* Read the Kennicutt-Schmidt power law exponent */
+  starform->KS_power_law =
+      parser_get_param_double(parameter_file, "EAGLEStarFormation:KS_exponent");
+
+  /* Calculate the power law of the corresponding star formation Schmidt law */
+  starform->SF_power_law = (starform->KS_power_law - 1.) / 2.;
+
+  /* Read the normalization of the KS law in KS law units */
+  starform->KS_normalization_MSUNpYRpKPC2 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:KS_normalisation");
+
+  /* Convert to internal units */
+  starform->KS_normalization =
+      starform->KS_normalization_MSUNpYRpKPC2 * Msun_per_kpc2_per_year;
+
+  /* Calculate the starformation pre-factor (eq. 12 of Schaye & Dalla Vecchia
+   * 2008) */
+  starform->SF_normalization =
+      starform->KS_normalization * pow(Msun_per_pc2, -starform->KS_power_law) *
+      pow(hydro_gamma * starform->fgas / G_newton, starform->SF_power_law);
+
+  /* Read the high density Kennicutt-Schmidt power law exponent */
+  starform->KS_high_den_power_law = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:KS_high_density_exponent");
+
+  /* Calculate the SF high density power law */
+  starform->SF_high_den_power_law = (starform->KS_high_den_power_law - 1.) / 2.;
+
+  /* Read the high density criteria for the KS law in number density per cm^3 */
+  starform->KS_high_den_thresh_HpCM3 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:KS_high_density_threshold_H_p_cm3");
+
+  /* Transform the KS high density criteria to simulation units */
+  starform->KS_high_den_thresh =
+      starform->KS_high_den_thresh_HpCM3 * number_density_from_cgs;
+
+  /* Pressure at the high-density threshold */
+  const double EOS_high_den_pressure =
+      EOS_pressure(starform->KS_high_den_thresh, starform);
+
+  /* Calculate the KS high density normalization
+   * We want the SF law to be continous so the normalisation of the second
+   * power-law is the value of the first power-law at the high-density threshold
+   */
+  starform->KS_high_den_normalization =
+      starform->KS_normalization *
+      pow(Msun_per_pc2,
+          starform->KS_high_den_power_law - starform->KS_power_law) *
+      pow(hydro_gamma * EOS_high_den_pressure * starform->fgas / G_newton,
+          (starform->KS_power_law - starform->KS_high_den_power_law) * 0.5f);
+
+  /* Calculate the SF high density normalization */
+  starform->SF_high_den_normalization =
+      starform->KS_high_den_normalization *
+      pow(Msun_per_pc2, -starform->KS_high_den_power_law) *
+      pow(hydro_gamma * starform->fgas / G_newton,
+          starform->SF_high_den_power_law);
+
+  /* Get the maximum physical density for SF */
+  starform->max_gas_density_HpCM3 = parser_get_opt_param_double(
+      parameter_file, "EAGLEStarFormation:KS_max_density_threshold_H_p_cm3",
+      FLT_MAX);
+
+  /* Convert the maximum physical density to internal units */
+  starform->max_gas_density =
+      starform->max_gas_density_HpCM3 * number_density_from_cgs;
+
+  starform->temperature_margin_threshold_dex = parser_get_opt_param_double(
+      parameter_file, "EAGLEStarFormation:KS_temperature_margin_dex", FLT_MAX);
+
+  starform->ten_to_temperature_margin_threshold_dex =
+      exp10(starform->temperature_margin_threshold_dex);
+
+  /* Read the normalization of the metallicity dependent critical
+   * density*/
+  starform->density_threshold_HpCM3 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:threshold_norm_H_p_cm3");
+
+  /* Convert to internal units */
+  starform->density_threshold =
+      starform->density_threshold_HpCM3 * number_density_from_cgs;
+
+  /* Read the scale metallicity Z0 */
+  starform->Z0 = parser_get_param_double(parameter_file,
+                                         "EAGLEStarFormation:threshold_Z0");
+  starform->Z0_inv = 1. / starform->Z0;
+
+  /* Read the power law of the critical density scaling */
+  starform->n_Z0 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:threshold_slope");
+
+  /* Read the maximum allowed density for star formation */
+  starform->density_threshold_max_HpCM3 = parser_get_param_double(
+      parameter_file, "EAGLEStarFormation:threshold_max_density_H_p_cm3");
+
+  /* Convert to internal units */
+  starform->density_threshold_max =
+      starform->density_threshold_max_HpCM3 * number_density_from_cgs;
+}
+
+/**
+ * @brief Prints the used parameters of the star formation law
+ *
+ * @param starform the star formation law properties.
+ * */
+INLINE static void starformation_print_backend(
+    const struct star_formation* starform) {
+
+  message("Star formation law is EAGLE (Schaye & Dalla Vecchia 2008)");
+  message(
+      "With properties: normalization = %e Msun/kpc^2/yr, slope of the"
+      "Kennicutt-Schmidt law = %e and gas fraction = %e ",
+      starform->KS_normalization_MSUNpYRpKPC2, starform->KS_power_law,
+      starform->fgas);
+  message("At densities of %e H/cm^3 the slope changes to %e.",
+          starform->KS_high_den_thresh_HpCM3, starform->KS_high_den_power_law);
+  message(
+      "The effective equation of state is given by: polytropic "
+      "index = %e , normalization density = %e #/cm^3 and normalization "
+      "temperature = %e K",
+      starform->EOS_polytropic_index, starform->EOS_density_norm_HpCM3,
+      starform->EOS_temperature_norm_K);
+  message("Density threshold follows Schaye (2004)");
+  message(
+      "the normalization of the density threshold is given by"
+      " %e #/cm^3, with metallicity slope of %e, and metallicity normalization"
+      " of %e, the maximum density threshold is given by %e #/cm^3",
+      starform->density_threshold_HpCM3, starform->n_Z0, starform->Z0,
+      starform->density_threshold_max_HpCM3);
+  message("Temperature threshold is given by Dalla Vecchia and Schaye (2012)");
+  message("The temperature threshold offset from the EOS is given by: %e dex",
+          starform->temperature_margin_threshold_dex);
+  message("Running with a maximum gas density given by: %e #/cm^3",
+          starform->max_gas_density_HpCM3);
+}
+
+/**
+ * @brief Finishes the density calculation.
+ *
+ * Nothing to do here. We do not need to compute any quantity in the hydro
+ * density loop for the EAGLE star formation model.
+ *
+ * @param p The particle to act upon
+ * @param cd The global star_formation information.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_end_density(
+    struct part* restrict p, const struct star_formation* cd,
+    const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
+ *
+ * Nothing to do here. We do not need to compute any quantity in the hydro
+ * density loop for the EAGLE star formation model.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cd #star_formation containing star_formation informations.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_part_has_no_neighbours(struct part* restrict p,
+                                      struct xpart* restrict xp,
+                                      const struct star_formation* cd,
+                                      const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here.
+ *
+ * @param phys_const The physical constant in internal units.
+ * @param us The unit system.
+ * @param cosmo The current cosmological model.
+ * @param data The global star_formation information used for this run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_first_init_part(const struct phys_const* restrict phys_const,
+                               const struct unit_system* restrict us,
+                               const struct cosmology* restrict cosmo,
+                               const struct star_formation* data,
+                               const struct part* restrict p,
+                               struct xpart* restrict xp) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here. We do not need to compute any quantity in the hydro
+ * density loop for the EAGLE star formation model.
+ *
+ * @param p Pointer to the particle data.
+ * @param data The global star_formation information.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_init_part(
+    struct part* restrict p, const struct star_formation* data) {}
+
+#endif /* SWIFT_EAGLE_STAR_FORMATION_H */
diff --git a/src/star_formation/EAGLE/star_formation_iact.h b/src/star_formation/EAGLE/star_formation_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..ab917cbe7aa67cad93a92a4b24212c5f1dcf3aeb
--- /dev/null
+++ b/src/star_formation/EAGLE/star_formation_iact.h
@@ -0,0 +1,71 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STAR_FORMATION_IACT_H
+#define SWIFT_EAGLE_STAR_FORMATION_IACT_H
+
+/**
+ * @file EAGLE/star_formation_iact.h
+ * @brief Density computation
+ */
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (symmetric
+ * version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_star_formation(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {
+
+  /* Nothing to do here. We do not need to compute any quantity in the hydro
+     density loop for the EAGLE star formation model. */
+}
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (non
+ * symmetric version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_star_formation(float r2, const float *dx, float hi, float hj,
+                                  struct part *restrict pi,
+                                  const struct part *restrict pj, float a,
+                                  float H) {
+
+  /* Nothing to do here. We do not need to compute any quantity in the hydro
+     density loop for the EAGLE star formation model. */
+}
+
+#endif /* SWIFT_EAGLE_STAR_FORMATION_IACT_H */
diff --git a/src/star_formation/EAGLE/star_formation_io.h b/src/star_formation/EAGLE/star_formation_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..cee96326e458d0581af6e62e452ac433dcf407bd
--- /dev/null
+++ b/src/star_formation/EAGLE/star_formation_io.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_EAGLE_IO_H
+#define SWIFT_STAR_FORMATION_EAGLE_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "io_properties.h"
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended data particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int star_formation_write_particles(
+    const struct part* parts, const struct xpart* xparts,
+    struct io_props* list) {
+
+  list[0] =
+      io_make_output_field("SFR", FLOAT, 1, UNIT_CONV_SFR, xparts, sf_data.SFR);
+
+  return 1;
+}
+
+#endif /* SWIFT_STAR_FORMATION_EAGLE_IO_H */
diff --git a/src/star_formation/EAGLE/star_formation_struct.h b/src/star_formation/EAGLE/star_formation_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..41247e160a3eddbc9184c59b67cfa2a1d7259a05
--- /dev/null
+++ b/src/star_formation/EAGLE/star_formation_struct.h
@@ -0,0 +1,32 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STAR_FORMATION_STRUCT_H
+#define SWIFT_EAGLE_STAR_FORMATION_STRUCT_H
+
+/**
+ * @brief Star-formation-related properties stored in the extended particle
+ * data.
+ */
+struct star_formation_xpart_data {
+
+  /*! Star formation rate */
+  float SFR;
+};
+
+#endif /* SWIFT_EAGLE_STAR_FORMATION_STRUCT_H */
diff --git a/src/star_formation/GEAR/star_formation.h b/src/star_formation/GEAR/star_formation.h
new file mode 100644
index 0000000000000000000000000000000000000000..420cecb6f6cdfa6be8d0803064122fa539131d0c
--- /dev/null
+++ b/src/star_formation/GEAR/star_formation.h
@@ -0,0 +1,215 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *******************************************************************************/
+#ifndef SWIFT_GEAR_STAR_FORMATION_H
+#define SWIFT_GEAR_STAR_FORMATION_H
+
+/* Local includes */
+#include "cosmology.h"
+#include "error.h"
+#include "hydro_properties.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "units.h"
+
+/**
+ * @brief Calculate if the gas has the potential of becoming
+ * a star.
+ *
+ * No star formation should occur, so return 0.
+ *
+ * @param starform the star formation law properties to use.
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cooling The cooling data struct.
+ *
+ */
+INLINE static int star_formation_is_star_forming(
+    const struct part* restrict p, const struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cooling_function_data* restrict cooling) {
+
+  return 0;
+}
+
+/**
+ * @brief Compute the star-formation rate of a given particle and store
+ * it into the #xpart.
+ *
+ * Nothing to do here.
+ *
+ * @param p #part.
+ * @param xp the #xpart.
+ * @param starform the star formation law properties to use
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param dt_star The time-step of this particle.
+ */
+INLINE static void star_formation_compute_SFR(
+    const struct part* restrict p, struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo, const double dt_star) {}
+
+/**
+ * @brief Decides whether a particle should be converted into a
+ * star or not.
+ *
+ * No SF should occur, so return 0.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param starform The properties of the star formation model.
+ * @param e The #engine (for random numbers).
+ * @param dt_star The time-step of this particle
+ * @return 1 if a conversion should be done, 0 otherwise.
+ */
+INLINE static int star_formation_should_convert_to_star(
+    const struct part* p, const struct xpart* xp,
+    const struct star_formation* starform, const struct engine* e,
+    const double dt_star) {
+
+  return 0;
+}
+
+/**
+ * @brief Update the SF properties of a particle that is not star forming.
+ *
+ * Nothing to do here.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param e The #engine.
+ * @param starform The properties of the star formation model.
+ * @param with_cosmology Are we running with cosmology switched on?
+ */
+INLINE static void star_formation_update_part_not_SFR(
+    struct part* p, struct xpart* xp, const struct engine* e,
+    const struct star_formation* starform, const int with_cosmology) {}
+
+/**
+ * @brief Copies the properties of the gas particle over to the
+ * star particle.
+ *
+ * Nothing to do here.
+ *
+ * @param e The #engine
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param sp the new created star particle with its properties.
+ * @param starform the star formation law properties to use.
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param with_cosmology if we run with cosmology.
+ */
+INLINE static void star_formation_copy_properties(
+    const struct part* p, const struct xpart* xp, struct spart* sp,
+    const struct engine* e, const struct star_formation* starform,
+    const struct cosmology* cosmo, const int with_cosmology) {}
+
+/**
+ * @brief initialization of the star formation law
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us The current internal system of units
+ * @param starform the star formation law properties to initialize
+ *
+ */
+INLINE static void starformation_init_backend(
+    struct swift_params* parameter_file, const struct phys_const* phys_const,
+    const struct unit_system* us, const struct hydro_props* hydro_props,
+    const struct star_formation* starform) {}
+
+/**
+ * @brief Prints the used parameters of the star formation law
+ *
+ * @param starform the star formation law properties.
+ */
+INLINE static void starformation_print_backend(
+    const struct star_formation* starform) {
+
+  message("Star formation law is 'GEAR'");
+}
+
+/**
+ * @brief Finishes the density calculation.
+ *
+ * @param p The particle to act upon
+ * @param cd The global star_formation information.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_end_density(
+    struct part* restrict p, const struct star_formation* cd,
+    const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cd #star_formation containing star_formation informations.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_part_has_no_neighbours(struct part* restrict p,
+                                      struct xpart* restrict xp,
+                                      const struct star_formation* cd,
+                                      const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here.
+ *
+ * @param phys_const The physical constant in internal units.
+ * @param us The unit system.
+ * @param cosmo The current cosmological model.
+ * @param data The global star_formation information used for this run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_first_init_part(const struct phys_const* restrict phys_const,
+                               const struct unit_system* restrict us,
+                               const struct cosmology* restrict cosmo,
+                               const struct star_formation* data,
+                               const struct part* restrict p,
+                               struct xpart* restrict xp) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here.
+ *
+ * @param p Pointer to the particle data.
+ * @param data The global star_formation information.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_init_part(
+    struct part* restrict p, const struct star_formation* data) {}
+
+#endif /* SWIFT_GEAR_STAR_FORMATION_H */
diff --git a/src/star_formation/GEAR/star_formation_iact.h b/src/star_formation/GEAR/star_formation_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..749b608068650a27cbe4c9a0ca4126d2740337f3
--- /dev/null
+++ b/src/star_formation/GEAR/star_formation_iact.h
@@ -0,0 +1,63 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_GEAR_STAR_FORMATION_IACT_H
+#define SWIFT_GEAR_STAR_FORMATION_IACT_H
+
+/**
+ * @file GEAR/star_formation_iact.h
+ * @brief Density computation
+ */
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (symmetric
+ * version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_star_formation(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {}
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (non
+ * symmetric version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_star_formation(float r2, const float *dx, float hi, float hj,
+                                  struct part *restrict pi,
+                                  const struct part *restrict pj, float a,
+                                  float H) {}
+
+#endif /* SWIFT_GEAR_STAR_FORMATION_IACT_H */
diff --git a/src/star_formation/GEAR/star_formation_io.h b/src/star_formation/GEAR/star_formation_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..6ef04c49c4abcd00175aaa164271628a9ff89360
--- /dev/null
+++ b/src/star_formation/GEAR/star_formation_io.h
@@ -0,0 +1,44 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_GEAR_IO_H
+#define SWIFT_STAR_FORMATION_GEAR_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "io_properties.h"
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended data particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int star_formation_write_particles(
+    const struct part* parts, const struct xpart* xparts,
+    struct io_props* list) {
+
+  return 0;
+}
+
+#endif /* SWIFT_STAR_FORMATION_GEAR_IO_H */
diff --git a/src/star_formation/GEAR/star_formation_struct.h b/src/star_formation/GEAR/star_formation_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..9b4e216fd2955f29d89dade6ee46c0e1af715cdb
--- /dev/null
+++ b/src/star_formation/GEAR/star_formation_struct.h
@@ -0,0 +1,31 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_GEAR_STAR_FORMATION_STRUCT_H
+#define SWIFT_GEAR_STAR_FORMATION_STRUCT_H
+
+/**
+ * @brief Star-formation-related properties stored in the extended particle
+ * data.
+ */
+struct star_formation_xpart_data {};
+
+/* Starformation struct */
+struct star_formation {};
+
+#endif /* SWIFT_GEAR_STAR_FORMATION_STRUCT_H */
diff --git a/src/star_formation/none/star_formation.h b/src/star_formation/none/star_formation.h
new file mode 100644
index 0000000000000000000000000000000000000000..7dbe5b20cc401c0284036f3c973c9b65fcec8d2e
--- /dev/null
+++ b/src/star_formation/none/star_formation.h
@@ -0,0 +1,218 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ *******************************************************************************/
+#ifndef SWIFT_NONE_STAR_FORMATION_H
+#define SWIFT_NONE_STAR_FORMATION_H
+
+/* Local includes */
+#include "cosmology.h"
+#include "error.h"
+#include "hydro_properties.h"
+#include "parser.h"
+#include "part.h"
+#include "physical_constants.h"
+#include "units.h"
+
+/* Starformation struct */
+struct star_formation {};
+
+/**
+ * @brief Calculate if the gas has the potential of becoming
+ * a star.
+ *
+ * No star formation should occur, so return 0.
+ *
+ * @param starform the star formation law properties to use.
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param hydro_props The properties of the hydro scheme.
+ * @param us The internal system of units.
+ * @param cooling The cooling data struct.
+ *
+ */
+INLINE static int star_formation_is_star_forming(
+    const struct part* restrict p, const struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo,
+    const struct hydro_props* restrict hydro_props,
+    const struct unit_system* restrict us,
+    const struct cooling_function_data* restrict cooling) {
+
+  return 0;
+}
+
+/**
+ * @brief Compute the star-formation rate of a given particle and store
+ * it into the #xpart.
+ *
+ * Nothing to do here.
+ *
+ * @param p #part.
+ * @param xp the #xpart.
+ * @param starform the star formation law properties to use
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param dt_star The time-step of this particle.
+ */
+INLINE static void star_formation_compute_SFR(
+    const struct part* restrict p, struct xpart* restrict xp,
+    const struct star_formation* starform, const struct phys_const* phys_const,
+    const struct cosmology* cosmo, const double dt_star) {}
+
+/**
+ * @brief Decides whether a particle should be converted into a
+ * star or not.
+ *
+ * No SF should occur, so return 0.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param starform The properties of the star formation model.
+ * @param e The #engine (for random numbers).
+ * @param dt_star The time-step of this particle
+ * @return 1 if a conversion should be done, 0 otherwise.
+ */
+INLINE static int star_formation_should_convert_to_star(
+    const struct part* p, const struct xpart* xp,
+    const struct star_formation* starform, const struct engine* e,
+    const double dt_star) {
+
+  return 0;
+}
+
+/**
+ * @brief Update the SF properties of a particle that is not star forming.
+ *
+ * Nothing to do here.
+ *
+ * @param p The #part.
+ * @param xp The #xpart.
+ * @param e The #engine.
+ * @param starform The properties of the star formation model.
+ * @param with_cosmology Are we running with cosmology switched on?
+ */
+INLINE static void star_formation_update_part_not_SFR(
+    struct part* p, struct xpart* xp, const struct engine* e,
+    const struct star_formation* starform, const int with_cosmology) {}
+
+/**
+ * @brief Copies the properties of the gas particle over to the
+ * star particle.
+ *
+ * Nothing to do here.
+ *
+ * @param e The #engine
+ * @param p the gas particles.
+ * @param xp the additional properties of the gas particles.
+ * @param sp the new created star particle with its properties.
+ * @param starform the star formation law properties to use.
+ * @param phys_const the physical constants in internal units.
+ * @param cosmo the cosmological parameters and properties.
+ * @param with_cosmology if we run with cosmology.
+ */
+INLINE static void star_formation_copy_properties(
+    const struct part* p, const struct xpart* xp, struct spart* sp,
+    const struct engine* e, const struct star_formation* starform,
+    const struct cosmology* cosmo, const int with_cosmology) {}
+
+/**
+ * @brief initialization of the star formation law
+ *
+ * @param parameter_file The parsed parameter file
+ * @param phys_const Physical constants in internal units
+ * @param us The current internal system of units
+ * @param starform the star formation law properties to initialize
+ *
+ */
+INLINE static void starformation_init_backend(
+    struct swift_params* parameter_file, const struct phys_const* phys_const,
+    const struct unit_system* us, const struct hydro_props* hydro_props,
+    const struct star_formation* starform) {}
+
+/**
+ * @brief Prints the used parameters of the star formation law
+ *
+ * @param starform the star formation law properties.
+ */
+INLINE static void starformation_print_backend(
+    const struct star_formation* starform) {
+
+  message("Star formation law is 'No Star Formation'");
+}
+
+/**
+ * @brief Finishes the density calculation.
+ *
+ * @param p The particle to act upon
+ * @param cd The global star_formation information.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_end_density(
+    struct part* restrict p, const struct star_formation* cd,
+    const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #part has 0 ngbs.
+ *
+ * @param p The particle to act upon
+ * @param xp The extended particle data to act upon
+ * @param cd #star_formation containing star_formation informations.
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_part_has_no_neighbours(struct part* restrict p,
+                                      struct xpart* restrict xp,
+                                      const struct star_formation* cd,
+                                      const struct cosmology* cosmo) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here.
+ *
+ * @param phys_const The physical constant in internal units.
+ * @param us The unit system.
+ * @param cosmo The current cosmological model.
+ * @param data The global star_formation information used for this run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data.
+ */
+__attribute__((always_inline)) INLINE static void
+star_formation_first_init_part(const struct phys_const* restrict phys_const,
+                               const struct unit_system* restrict us,
+                               const struct cosmology* restrict cosmo,
+                               const struct star_formation* data,
+                               const struct part* restrict p,
+                               struct xpart* restrict xp) {}
+
+/**
+ * @brief Sets the star_formation properties of the (x-)particles to a valid
+ * start state.
+ *
+ * Nothing to do here.
+ *
+ * @param p Pointer to the particle data.
+ * @param data The global star_formation information.
+ */
+__attribute__((always_inline)) INLINE static void star_formation_init_part(
+    struct part* restrict p, const struct star_formation* data) {}
+
+#endif /* SWIFT_NONE_STAR_FORMATION_H */
diff --git a/src/star_formation/none/star_formation_iact.h b/src/star_formation/none/star_formation_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..dd74115bec699029748806b512c9d6bd7fb829fe
--- /dev/null
+++ b/src/star_formation/none/star_formation_iact.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_NONE_STAR_FORMATION_IACT_H
+#define SWIFT_NONE_STAR_FORMATION_IACT_H
+
+/**
+ * @file none/star_formation_iact.h
+ * @brief Density computation
+ */
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (symmetric
+ * version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle.
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void runner_iact_star_formation(
+    float r2, const float *dx, float hi, float hj, struct part *restrict pi,
+    struct part *restrict pj, float a, float H) {}
+
+/**
+ * @brief do star_formation computation after the runner_iact_density (non
+ * symmetric version)
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param pi First particle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_star_formation(float r2, const float *dx, float hi, float hj,
+                                  struct part *restrict pi,
+                                  const struct part *restrict pj, float a,
+                                  float H) {}
+
+#endif /* SWIFT_NONE_STAR_FORMATION_IACT_H */
diff --git a/src/star_formation/none/star_formation_io.h b/src/star_formation/none/star_formation_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..b271926f915279b681aac8348a0e375083901deb
--- /dev/null
+++ b/src/star_formation/none/star_formation_io.h
@@ -0,0 +1,44 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_NONE_IO_H
+#define SWIFT_STAR_FORMATION_NONE_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "io_properties.h"
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended data particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int star_formation_write_particles(
+    const struct part* parts, const struct xpart* xparts,
+    struct io_props* list) {
+
+  return 0;
+}
+
+#endif /* SWIFT_STAR_FORMATION_NONE_IO_H */
diff --git a/src/star_formation/none/star_formation_struct.h b/src/star_formation/none/star_formation_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..27a2adaf83d0a02a0d08e7eef8b45bea630689e4
--- /dev/null
+++ b/src/star_formation/none/star_formation_struct.h
@@ -0,0 +1,28 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_NONE_STAR_FORMATION_STRUCT_H
+#define SWIFT_NONE_STAR_FORMATION_STRUCT_H
+
+/**
+ * @brief Star-formation-related properties stored in the extended particle
+ * data.
+ */
+struct star_formation_xpart_data {};
+
+#endif /* SWIFT_NONE_STAR_FORMATION_STRUCT_H */
diff --git a/src/star_formation_iact.h b/src/star_formation_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..ef457214a23102bc33385705db41c89dc29d8b8f
--- /dev/null
+++ b/src/star_formation_iact.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_IACT_H
+#define SWIFT_STAR_FORMATION_IACT_H
+
+/**
+ * @file src/star_formation_iact.h
+ * @brief Branches between the different star formation iact.
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right star formation law definition */
+#if defined(STAR_FORMATION_NONE)
+#include "./star_formation/none/star_formation_iact.h"
+#elif defined(STAR_FORMATION_EAGLE)
+#include "./star_formation/EAGLE/star_formation_iact.h"
+#elif defined(STAR_FORMATION_GEAR)
+#include "./star_formation/GEAR/star_formation_iact.h"
+#else
+#error "Invalid choice of star formation law"
+#endif
+
+#endif /* SWIFT_STAR_FORMATION_IACT_H */
diff --git a/src/star_formation_io.h b/src/star_formation_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..6c57bdc819fb8b583d217dbd60f402918d8f81ef
--- /dev/null
+++ b/src/star_formation_io.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_IO_H
+#define SWIFT_STAR_FORMATION_IO_H
+
+/**
+ * @file src/star_formation_io.h
+ * @brief Branches between the i/o routines for the SF code.
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right cooling definition */
+#if defined(STAR_FORMATION_NONE)
+#include "./star_formation/none/star_formation_io.h"
+#elif defined(STAR_FORMATION_EAGLE)
+#include "./star_formation/EAGLE/star_formation_io.h"
+#elif defined(STAR_FORMATION_GEAR)
+#include "./star_formation/GEAR/star_formation_io.h"
+#else
+#error "Invalid choice of star formation model."
+#endif
+
+#endif /* SWIFT_STAR_FORMATION_IO_H */
diff --git a/src/star_formation_struct.h b/src/star_formation_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a62d284b435c353525311979b343754856364e8
--- /dev/null
+++ b/src/star_formation_struct.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_STAR_FORMATION_STRUCT_H
+#define SWIFT_STAR_FORMATION_STRUCT_H
+
+/**
+ * @file src/star_formation_struct.h
+ * @brief Branches between the different particle data SF tracers
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right cooling definition */
+#if defined(STAR_FORMATION_NONE)
+#include "./star_formation/none/star_formation_struct.h"
+#elif defined(STAR_FORMATION_EAGLE)
+#include "./star_formation/EAGLE/star_formation_struct.h"
+#elif defined(STAR_FORMATION_GEAR)
+#include "./star_formation/GEAR/star_formation_struct.h"
+#else
+#error "Invalid choice of star formation structure."
+#endif
+
+#endif /* SWIFT_STAR_FORMATION_STRUCT_H */
diff --git a/src/stars.h b/src/stars.h
index ade47ff57298c13bf205e991548945576a802293..fc7ee74d3a2cae91ee209c4008eee4d5dd0f375e 100644
--- a/src/stars.h
+++ b/src/stars.h
@@ -16,15 +16,24 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_STAR_H
-#define SWIFT_STAR_H
+#ifndef SWIFT_STARS_H
+#define SWIFT_STARS_H
 
 /* Config parameters. */
 #include "../config.h"
 
-/* So far only one model here */
-/* Straight-forward import */
-#include "./stars/Default/star.h"
-#include "./stars/Default/star_iact.h"
+/* Select the correct star model */
+#if defined(STARS_NONE)
+#include "./stars/Default/stars.h"
+#include "./stars/Default/stars_iact.h"
+#elif defined(STARS_EAGLE)
+#include "./stars/EAGLE/stars.h"
+#include "./stars/EAGLE/stars_iact.h"
+#elif defined(STARS_GEAR)
+#include "./stars/GEAR/stars.h"
+#include "./stars/GEAR/stars_iact.h"
+#else
+#error "Invalid choice of star model"
+#endif
 
 #endif
diff --git a/src/stars/Default/star.h b/src/stars/Default/star.h
deleted file mode 100644
index 61ae4aeb5c51e18e39c3f4c6855d7c6ddfe05abb..0000000000000000000000000000000000000000
--- a/src/stars/Default/star.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-#ifndef SWIFT_DEFAULT_STAR_H
-#define SWIFT_DEFAULT_STAR_H
-
-#include <float.h>
-#include "minmax.h"
-
-/**
- * @brief Computes the gravity time-step of a given star particle.
- *
- * @param sp Pointer to the s-particle data.
- */
-__attribute__((always_inline)) INLINE static float star_compute_timestep(
-    const struct spart* const sp) {
-
-  return FLT_MAX;
-}
-
-/**
- * @brief Initialises the s-particles for the first time
- *
- * This function is called only once just after the ICs have been
- * read in to do some conversions.
- *
- * @param sp The particle to act upon
- */
-__attribute__((always_inline)) INLINE static void star_first_init_spart(
-    struct spart* sp) {
-
-  sp->time_bin = 0;
-}
-
-/**
- * @brief Prepares a s-particle for its interactions
- *
- * @param sp The particle to act upon
- */
-__attribute__((always_inline)) INLINE static void star_init_spart(
-    struct spart* sp) {}
-
-/**
- * @brief Sets the values to be predicted in the drifts to their values at a
- * kick time
- *
- * @param sp The particle.
- */
-__attribute__((always_inline)) INLINE static void star_reset_predicted_values(
-    struct spart* restrict sp) {}
-
-/**
- * @brief Finishes the calculation of (non-gravity) forces acting on stars
- *
- * Multiplies the forces and accelerations by the appropiate constants
- *
- * @param sp The particle to act upon
- */
-__attribute__((always_inline)) INLINE static void star_end_force(
-    struct spart* sp) {}
-
-/**
- * @brief Kick the additional variables
- *
- * @param sp The particle to act upon
- * @param dt The time-step for this kick
- */
-__attribute__((always_inline)) INLINE static void star_kick_extra(
-    struct spart* sp, float dt) {}
-
-#endif /* SWIFT_DEFAULT_STAR_H */
diff --git a/src/stars/Default/star_iact.h b/src/stars/Default/star_iact.h
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/src/stars/Default/star_io.h b/src/stars/Default/star_io.h
deleted file mode 100644
index 7ad29f0a935c002b1337c2a75d6f987c05c9bb43..0000000000000000000000000000000000000000
--- a/src/stars/Default/star_io.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*******************************************************************************
- * This file is part of SWIFT.
- * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- ******************************************************************************/
-#ifndef SWIFT_DEFAULT_STAR_IO_H
-#define SWIFT_DEFAULT_STAR_IO_H
-
-#include "io_properties.h"
-
-/**
- * @brief Specifies which s-particle fields to read from a dataset
- *
- * @param sparts The s-particle array.
- * @param list The list of i/o properties to read.
- * @param num_fields The number of i/o fields to read.
- */
-INLINE static void star_read_particles(struct spart* sparts,
-                                       struct io_props* list, int* num_fields) {
-
-  /* Say how much we want to read */
-  *num_fields = 4;
-
-  /* List what we want to read */
-  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
-                                UNIT_CONV_LENGTH, sparts, x);
-  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
-                                UNIT_CONV_SPEED, sparts, v);
-  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
-                                sparts, mass);
-  list[3] = io_make_input_field("ParticleIDs", LONGLONG, 1, COMPULSORY,
-                                UNIT_CONV_NO_UNITS, sparts, id);
-}
-
-/**
- * @brief Specifies which s-particle fields to write to a dataset
- *
- * @param sparts The s-particle array.
- * @param list The list of i/o properties to write.
- * @param num_fields The number of i/o fields to write.
- */
-INLINE static void star_write_particles(const struct spart* sparts,
-                                        struct io_props* list,
-                                        int* num_fields) {
-
-  /* Say how much we want to read */
-  *num_fields = 4;
-
-  /* List what we want to read */
-  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
-                                 sparts, x);
-  list[1] =
-      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, sparts, v);
-  list[2] =
-      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, sparts, mass);
-  list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS,
-                                 sparts, id);
-}
-
-#endif /* SWIFT_DEFAULT_STAR_IO_H */
diff --git a/src/stars/Default/stars.h b/src/stars/Default/stars.h
new file mode 100644
index 0000000000000000000000000000000000000000..586a87f75600a08acfd84b0f7ecc57fc4573281f
--- /dev/null
+++ b/src/stars/Default/stars.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_DEFAULT_STARS_H
+#define SWIFT_DEFAULT_STARS_H
+
+#include <float.h>
+#include "minmax.h"
+
+/**
+ * @brief Computes the gravity time-step of a given star particle.
+ *
+ * @param sp Pointer to the s-particle data.
+ */
+__attribute__((always_inline)) INLINE static float stars_compute_timestep(
+    const struct spart* const sp) {
+
+  return FLT_MAX;
+}
+
+/**
+ * @brief Initialises the s-particles for the first time
+ *
+ * This function is called only once just after the ICs have been
+ * read in to do some conversions.
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_first_init_spart(
+    struct spart* sp) {
+
+  sp->time_bin = 0;
+}
+
+/**
+ * @brief Prepares a s-particle for its interactions
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_init_spart(
+    struct spart* sp) {
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i)
+    sp->ids_ngbs_density[i] = -1;
+  sp->num_ngb_density = 0;
+#endif
+
+  sp->density.wcount = 0.f;
+  sp->density.wcount_dh = 0.f;
+}
+
+/**
+ * @brief Predict additional particle fields forward in time when drifting
+ *
+ * @param sp The particle
+ * @param dt_drift The drift time-step for positions.
+ */
+__attribute__((always_inline)) INLINE static void stars_predict_extra(
+    struct spart* restrict sp, float dt_drift) {
+
+  // MATTHIEU
+  /* const float h_inv = 1.f / sp->h; */
+
+  /* /\* Predict smoothing length *\/ */
+  /* const float w1 = sp->feedback.h_dt * h_inv * dt_drift; */
+  /* if (fabsf(w1) < 0.2f) */
+  /*   sp->h *= approx_expf(w1); /\* 4th order expansion of exp(w) *\/ */
+  /* else */
+  /*   sp->h *= expf(w1); */
+}
+
+/**
+ * @brief Sets the values to be predicted in the drifts to their values at a
+ * kick time
+ *
+ * @param sp The particle.
+ */
+__attribute__((always_inline)) INLINE static void stars_reset_predicted_values(
+    struct spart* restrict sp) {}
+
+/**
+ * @brief Finishes the calculation of (non-gravity) forces acting on stars
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_end_feedback(
+    struct spart* sp) {
+
+  sp->feedback.h_dt *= sp->h * hydro_dimension_inv;
+}
+
+/**
+ * @brief Kick the additional variables
+ *
+ * @param sp The particle to act upon
+ * @param dt The time-step for this kick
+ */
+__attribute__((always_inline)) INLINE static void stars_kick_extra(
+    struct spart* sp, float dt) {}
+
+/**
+ * @brief Finishes the calculation of density on stars
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void stars_end_density(
+    struct spart* sp, const struct cosmology* cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = sp->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv);       /* 1/h^d */
+  const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */
+
+  /* Finish the calculation by inserting the missing h-factors */
+  sp->density.wcount *= h_inv_dim;
+  sp->density.wcount_dh *= h_inv_dim_plus_one;
+}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #spart has 0
+ * ngbs.
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void stars_spart_has_no_neighbours(
+    struct spart* restrict sp, const struct cosmology* cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = sp->h;
+  const float h_inv = 1.0f / h;                 /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv); /* 1/h^d */
+
+  /* Re-set problematic values */
+  sp->density.wcount = kernel_root * h_inv_dim;
+  sp->density.wcount_dh = 0.f;
+}
+
+/**
+ * @brief Evolve the stellar properties of a #spart.
+ *
+ * This function allows for example to compute the SN rate before sending
+ * this information to a different MPI rank.
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ * @param stars_properties The #stars_props
+ */
+__attribute__((always_inline)) INLINE static void stars_evolve_spart(
+    struct spart* restrict sp, const struct stars_props* stars_properties,
+    const struct cosmology* cosmo) {}
+
+/**
+ * @brief Reset acceleration fields of a particle
+ *
+ * This is the equivalent of hydro_reset_acceleration.
+ * We do not compute the acceleration on star, therefore no need to use it.
+ *
+ * @param p The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_reset_feedback(
+    struct spart* restrict p) {
+
+  /* Reset time derivative */
+  p->feedback.h_dt = 0.f;
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i)
+    p->ids_ngbs_force[i] = -1;
+  p->num_ngb_force = 0;
+#endif
+}
+
+#endif /* SWIFT_DEFAULT_STARS_H */
diff --git a/src/stars/Default/stars_debug.h b/src/stars/Default/stars_debug.h
new file mode 100644
index 0000000000000000000000000000000000000000..39ae754ddf60910ae07b3252e151c1f619588161
--- /dev/null
+++ b/src/stars/Default/stars_debug.h
@@ -0,0 +1,31 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_DEFAULT_STARS_DEBUG_H
+#define SWIFT_DEFAULT_STARS_DEBUG_H
+
+__attribute__((always_inline)) INLINE static void stars_debug_particle(
+    const struct spart* p) {
+  printf(
+      "x=[%.3e,%.3e,%.3e], "
+      "v_full=[%.3e,%.3e,%.3e] p->mass=%.3e \n t_begin=%d, t_end=%d\n",
+      p->x[0], p->x[1], p->x[2], p->v_full[0], p->v_full[1], p->v_full[2],
+      p->mass, p->ti_begin, p->ti_end);
+}
+
+#endif /* SWIFT_DEFAULT_STARS_DEBUG_H */
diff --git a/src/stars/Default/stars_iact.h b/src/stars/Default/stars_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..0a9fa7f792527b44c46ff950ba82b1708ed410ff
--- /dev/null
+++ b/src/stars/Default/stars_iact.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *                    Loic Hausammann (loic.hausammann@epfl.ch)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_DEFAULT_STARS_IACT_H
+#define SWIFT_DEFAULT_STARS_IACT_H
+
+/**
+ * @brief Density interaction between two particles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param si First sparticle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_stars_density(float r2, const float *dx, float hi, float hj,
+                                 struct spart *restrict si,
+                                 const struct part *restrict pj, float a,
+                                 float H) {
+
+  float wi, wi_dx;
+
+  /* Get r and 1/r. */
+  const float r_inv = 1.0f / sqrtf(r2);
+  const float r = r2 * r_inv;
+
+  /* Compute the kernel function */
+  const float hi_inv = 1.0f / hi;
+  const float ui = r * hi_inv;
+  kernel_deval(ui, &wi, &wi_dx);
+
+  /* Compute contribution to the number of neighbours */
+  si->density.wcount += wi;
+  si->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /* Update ngb counters */
+  if (si->num_ngb_density < MAX_NUM_OF_NEIGHBOURS_STARS)
+    si->ids_ngbs_density[si->num_ngb_density] = pj->id;
+
+  /* Update ngb counters */
+  ++si->num_ngb_density;
+#endif
+}
+
+/**
+ * @brief Feedback interaction between two particles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param si First sparticle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_stars_feedback(float r2, const float *dx, float hi, float hj,
+                                  struct spart *restrict si,
+                                  struct part *restrict pj, float a, float H) {
+
+  const float mj = hydro_get_mass(pj);
+  const float rhoj = hydro_get_comoving_density(pj);
+  const float r = sqrtf(r2);
+  const float ri = 1.f / r;
+
+  /* Get the kernel for hi. */
+  float hi_inv = 1.0f / hi;
+  float hid_inv = pow_dimension_plus_one(hi_inv); /* 1/h^(d+1) */
+  float xi = r * hi_inv;
+  float wi, wi_dx;
+  kernel_deval(xi, &wi, &wi_dx);
+  float wi_dr = hid_inv * wi_dx;
+
+  /* Compute dv dot r */
+  float dvdr = (si->v[0] - pj->v[0]) * dx[0] + (si->v[1] - pj->v[1]) * dx[1] +
+               (si->v[2] - pj->v[2]) * dx[2];
+
+  /* Get the time derivative for h. */
+  si->feedback.h_dt -= mj * dvdr * ri / rhoj * wi_dr;
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /* Update ngb counters */
+  if (si->num_ngb_force < MAX_NUM_OF_NEIGHBOURS_STARS)
+    si->ids_ngbs_force[si->num_ngb_force] = pj->id;
+
+  /* Update ngb counters */
+  ++si->num_ngb_force;
+#endif
+}
+
+#endif /* SWIFT_DEFAULT_STARS_IACT_H */
diff --git a/src/stars/Default/stars_io.h b/src/stars/Default/stars_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..26a42b8c0f80beb32695e2cb00716f283289663d
--- /dev/null
+++ b/src/stars/Default/stars_io.h
@@ -0,0 +1,209 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_DEFAULT_STARS_IO_H
+#define SWIFT_DEFAULT_STARS_IO_H
+
+#include "io_properties.h"
+#include "stars_part.h"
+
+/**
+ * @brief Specifies which s-particle fields to read from a dataset
+ *
+ * @param sparts The s-particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
+ */
+INLINE static void stars_read_particles(struct spart *sparts,
+                                        struct io_props *list,
+                                        int *num_fields) {
+
+  /* Say how much we want to read */
+  *num_fields = 5;
+
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, sparts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, sparts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                sparts, mass);
+  list[3] = io_make_input_field("ParticleIDs", LONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, sparts, id);
+  list[4] = io_make_input_field("SmoothingLength", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_LENGTH, sparts, h);
+}
+
+/**
+ * @brief Specifies which s-particle fields to write to a dataset
+ *
+ * @param sparts The s-particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
+ */
+INLINE static void stars_write_particles(const struct spart *sparts,
+                                         struct io_props *list,
+                                         int *num_fields) {
+
+  /* Say how much we want to write */
+  *num_fields = 5;
+
+  /* List what we want to write */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 sparts, x);
+  list[1] =
+      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, sparts, v);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, sparts, mass);
+  list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS,
+                                 sparts, id);
+  list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 sparts, h);
+
+#ifdef DEBUG_INTERACTIONS_STARS
+
+  list += *num_fields;
+  *num_fields += 4;
+
+  list[0] = io_make_output_field("Num_ngb_density", INT, 1, UNIT_CONV_NO_UNITS,
+                                 sparts, num_ngb_density);
+  list[1] = io_make_output_field("Num_ngb_force", INT, 1, UNIT_CONV_NO_UNITS,
+                                 sparts, num_ngb_force);
+  list[2] = io_make_output_field("Ids_ngb_density", LONGLONG,
+                                 MAX_NUM_OF_NEIGHBOURS_STARS,
+                                 UNIT_CONV_NO_UNITS, sparts, ids_ngbs_density);
+  list[3] = io_make_output_field("Ids_ngb_force", LONGLONG,
+                                 MAX_NUM_OF_NEIGHBOURS_STARS,
+                                 UNIT_CONV_NO_UNITS, sparts, ids_ngbs_force);
+#endif
+}
+
+/**
+ * @brief Initialize the global properties of the stars scheme.
+ *
+ * By default, takes the values provided by the hydro.
+ *
+ * @param sp The #stars_props.
+ * @param phys_const The physical constants in the internal unit system.
+ * @param us The internal unit system.
+ * @param params The parsed parameters.
+ * @param p The already read-in properties of the hydro scheme.
+ */
+INLINE static void stars_props_init(struct stars_props *sp,
+                                    const struct phys_const *phys_const,
+                                    const struct unit_system *us,
+                                    struct swift_params *params,
+                                    const struct hydro_props *p) {
+
+  /* Kernel properties */
+  sp->eta_neighbours = parser_get_opt_param_float(
+      params, "Stars:resolution_eta", p->eta_neighbours);
+
+  /* Tolerance for the smoothing length Newton-Raphson scheme */
+  sp->h_tolerance =
+      parser_get_opt_param_float(params, "Stars:h_tolerance", p->h_tolerance);
+
+  /* Get derived properties */
+  sp->target_neighbours = pow_dimension(sp->eta_neighbours) * kernel_norm;
+  const float delta_eta = sp->eta_neighbours * (1.f + sp->h_tolerance);
+  sp->delta_neighbours =
+      (pow_dimension(delta_eta) - pow_dimension(sp->eta_neighbours)) *
+      kernel_norm;
+
+  /* Number of iterations to converge h */
+  sp->max_smoothing_iterations = parser_get_opt_param_int(
+      params, "Stars:max_ghost_iterations", p->max_smoothing_iterations);
+
+  /* Time integration properties */
+  const float max_volume_change =
+      parser_get_opt_param_float(params, "Stars:max_volume_change", -1);
+  if (max_volume_change == -1)
+    sp->log_max_h_change = p->log_max_h_change;
+  else
+    sp->log_max_h_change = logf(powf(max_volume_change, hydro_dimension_inv));
+}
+
+/**
+ * @brief Print the global properties of the stars scheme.
+ *
+ * @param sp The #stars_props.
+ */
+INLINE static void stars_props_print(const struct stars_props *sp) {
+
+  /* Now stars */
+  message("Stars kernel: %s with eta=%f (%.2f neighbours).", kernel_name,
+          sp->eta_neighbours, sp->target_neighbours);
+
+  message("Stars relative tolerance in h: %.5f (+/- %.4f neighbours).",
+          sp->h_tolerance, sp->delta_neighbours);
+
+  message(
+      "Stars integration: Max change of volume: %.2f "
+      "(max|dlog(h)/dt|=%f).",
+      pow_dimension(expf(sp->log_max_h_change)), sp->log_max_h_change);
+
+  message("Maximal iterations in ghost task set to %d",
+          sp->max_smoothing_iterations);
+}
+
+#if defined(HAVE_HDF5)
+INLINE static void stars_props_print_snapshot(hid_t h_grpstars,
+                                              const struct stars_props *sp) {
+
+  io_write_attribute_s(h_grpstars, "Kernel function", kernel_name);
+  io_write_attribute_f(h_grpstars, "Kernel target N_ngb",
+                       sp->target_neighbours);
+  io_write_attribute_f(h_grpstars, "Kernel delta N_ngb", sp->delta_neighbours);
+  io_write_attribute_f(h_grpstars, "Kernel eta", sp->eta_neighbours);
+  io_write_attribute_f(h_grpstars, "Smoothing length tolerance",
+                       sp->h_tolerance);
+  io_write_attribute_f(h_grpstars, "Volume log(max(delta h))",
+                       sp->log_max_h_change);
+  io_write_attribute_f(h_grpstars, "Volume max change time-step",
+                       pow_dimension(expf(sp->log_max_h_change)));
+  io_write_attribute_i(h_grpstars, "Max ghost iterations",
+                       sp->max_smoothing_iterations);
+}
+#endif
+
+/**
+ * @brief Write a #stars_props struct to the given FILE as a stream of bytes.
+ *
+ * @param p the struct
+ * @param stream the file stream
+ */
+INLINE static void stars_props_struct_dump(const struct stars_props *p,
+                                           FILE *stream) {
+  restart_write_blocks((void *)p, sizeof(struct stars_props), 1, stream,
+                       "starsprops", "stars props");
+}
+
+/**
+ * @brief Restore a stars_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * @param p the struct
+ * @param stream the file stream
+ */
+INLINE static void stars_props_struct_restore(const struct stars_props *p,
+                                              FILE *stream) {
+  restart_read_blocks((void *)p, sizeof(struct stars_props), 1, stream, NULL,
+                      "stars props");
+}
+
+#endif /* SWIFT_DEFAULT_STAR_IO_H */
diff --git a/src/stars/Default/stars_part.h b/src/stars/Default/stars_part.h
new file mode 100644
index 0000000000000000000000000000000000000000..bed2e14756ff2b2b83dbd1f5de821aae4ca7be51
--- /dev/null
+++ b/src/stars/Default/stars_part.h
@@ -0,0 +1,136 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_DEFAULT_STAR_PART_H
+#define SWIFT_DEFAULT_STAR_PART_H
+
+/* Some standard headers. */
+#include <stdlib.h>
+
+/* Read chemistry */
+#include "chemistry_struct.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Particle fields for the star particles.
+ *
+ * All quantities related to gravity are stored in the associate #gpart.
+ */
+struct spart {
+
+  /*! Particle ID. */
+  long long id;
+
+  /*! Pointer to corresponding gravity part. */
+  struct gpart* gpart;
+
+  /*! Particle position. */
+  double x[3];
+
+  /* Offset between current position and position at last tree rebuild. */
+  float x_diff[3];
+
+  /* Offset between current position and position at last tree rebuild. */
+  float x_diff_sort[3];
+
+  /*! Particle velocity. */
+  float v[3];
+
+  /*! Star mass */
+  float mass;
+
+  /* Particle cutoff radius. */
+  float h;
+
+  /*! Particle time bin */
+  timebin_t time_bin;
+
+  struct {
+
+    /* Number of neighbours. */
+    float wcount;
+
+    /* Number of neighbours spatial derivative. */
+    float wcount_dh;
+
+  } density;
+
+  struct {
+
+    /* Change in smoothing length over time. */
+    float h_dt;
+
+  } feedback;
+
+  /*! Tracer structure */
+  struct tracers_xpart_data tracers_data;
+
+  /*! Chemistry structure */
+  struct chemistry_part_data chemistry_data;
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Time of the last drift */
+  integertime_t ti_drift;
+
+  /* Time of the last kick */
+  integertime_t ti_kick;
+
+#endif
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /*! Number of interactions in the density SELF and PAIR */
+  int num_ngb_density;
+
+  /*! List of interacting particles in the density SELF and PAIR */
+  long long ids_ngbs_density[MAX_NUM_OF_NEIGHBOURS_STARS];
+
+  /*! Number of interactions in the force SELF and PAIR */
+  int num_ngb_force;
+
+  /*! List of interacting particles in the force SELF and PAIR */
+  long long ids_ngbs_force[MAX_NUM_OF_NEIGHBOURS_STARS];
+#endif
+
+} SWIFT_STRUCT_ALIGN;
+
+/**
+ * @brief Contains all the constants and parameters of the stars scheme
+ */
+struct stars_props {
+
+  /*! Resolution parameter */
+  float eta_neighbours;
+
+  /*! Target weightd number of neighbours (for info only)*/
+  float target_neighbours;
+
+  /*! Smoothing length tolerance */
+  float h_tolerance;
+
+  /*! Tolerance on neighbour number  (for info only)*/
+  float delta_neighbours;
+
+  /*! Maximal number of iterations to converge h */
+  int max_smoothing_iterations;
+
+  /*! Maximal change of h over one time-step */
+  float log_max_h_change;
+};
+
+#endif /* SWIFT_DEFAULT_STAR_PART_H */
diff --git a/src/stars/EAGLE/stars.h b/src/stars/EAGLE/stars.h
new file mode 100644
index 0000000000000000000000000000000000000000..ea63dd84453d7c02efc1232a2d96ef14af840b29
--- /dev/null
+++ b/src/stars/EAGLE/stars.h
@@ -0,0 +1,196 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STARS_H
+#define SWIFT_EAGLE_STARS_H
+
+#include <float.h>
+#include "minmax.h"
+
+/**
+ * @brief Computes the gravity time-step of a given star particle.
+ *
+ * @param sp Pointer to the s-particle data.
+ */
+__attribute__((always_inline)) INLINE static float stars_compute_timestep(
+    const struct spart* const sp) {
+
+  return FLT_MAX;
+}
+
+/**
+ * @brief Prepares a s-particle for its interactions
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_init_spart(
+    struct spart* sp) {
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i)
+    sp->ids_ngbs_density[i] = -1;
+  sp->num_ngb_density = 0;
+#endif
+
+  sp->density.wcount = 0.f;
+  sp->density.wcount_dh = 0.f;
+  sp->rho_gas = 0.f;
+}
+
+/**
+ * @brief Initialises the s-particles for the first time
+ *
+ * This function is called only once just after the ICs have been
+ * read in to do some conversions.
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_first_init_spart(
+    struct spart* sp) {
+
+  sp->time_bin = 0;
+  sp->birth_density = -1.f;
+  sp->birth_time = -1.f;
+
+  stars_init_spart(sp);
+}
+
+/**
+ * @brief Predict additional particle fields forward in time when drifting
+ *
+ * @param sp The particle
+ * @param dt_drift The drift time-step for positions.
+ */
+__attribute__((always_inline)) INLINE static void stars_predict_extra(
+    struct spart* restrict sp, float dt_drift) {
+
+  // MATTHIEU
+  /* const float h_inv = 1.f / sp->h; */
+
+  /* /\* Predict smoothing length *\/ */
+  /* const float w1 = sp->feedback.h_dt * h_inv * dt_drift; */
+  /* if (fabsf(w1) < 0.2f) */
+  /*   sp->h *= approx_expf(w1); /\* 4th order expansion of exp(w) *\/ */
+  /* else */
+  /*   sp->h *= expf(w1); */
+}
+
+/**
+ * @brief Sets the values to be predicted in the drifts to their values at a
+ * kick time
+ *
+ * @param sp The particle.
+ */
+__attribute__((always_inline)) INLINE static void stars_reset_predicted_values(
+    struct spart* restrict sp) {}
+
+/**
+ * @brief Finishes the calculation of (non-gravity) forces acting on stars
+ *
+ * Multiplies the forces and accelerations by the appropiate constants
+ *
+ * @param sp The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_end_feedback(
+    struct spart* sp) {
+
+  sp->feedback.h_dt *= sp->h * hydro_dimension_inv;
+}
+
+/**
+ * @brief Kick the additional variables
+ *
+ * @param sp The particle to act upon
+ * @param dt The time-step for this kick
+ */
+__attribute__((always_inline)) INLINE static void stars_kick_extra(
+    struct spart* sp, float dt) {}
+
+/**
+ * @brief Finishes the calculation of density on stars
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void stars_end_density(
+    struct spart* sp, const struct cosmology* cosmo) {
+
+  /* Some smoothing length multiples. */
+  const float h = sp->h;
+  const float h_inv = 1.0f / h;                       /* 1/h */
+  const float h_inv_dim = pow_dimension(h_inv);       /* 1/h^d */
+  const float h_inv_dim_plus_one = h_inv_dim * h_inv; /* 1/h^(d+1) */
+
+  /* Finish the calculation by inserting the missing h-factors */
+  sp->rho_gas *= h_inv_dim;
+  sp->density.wcount *= h_inv_dim;
+  sp->density.wcount_dh *= h_inv_dim_plus_one;
+}
+
+/**
+ * @brief Sets all particle fields to sensible values when the #spart has 0
+ * ngbs.
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ */
+__attribute__((always_inline)) INLINE static void stars_spart_has_no_neighbours(
+    struct spart* restrict sp, const struct cosmology* cosmo) {
+
+  /* Re-set problematic values */
+  sp->density.wcount = 0.f;
+  sp->density.wcount_dh = 0.f;
+  sp->rho_gas = 0.f;
+}
+
+/**
+ * @brief Evolve the stellar properties of a #spart.
+ *
+ * This function allows for example to compute the SN rate before sending
+ * this information to a different MPI rank.
+ *
+ * @param sp The particle to act upon
+ * @param cosmo The current cosmological model.
+ * @param stars_properties The #stars_props
+ */
+__attribute__((always_inline)) INLINE static void stars_evolve_spart(
+    struct spart* restrict sp, const struct stars_props* stars_properties,
+    const struct cosmology* cosmo) {}
+
+/**
+ * @brief Reset acceleration fields of a particle
+ *
+ * This is the equivalent of hydro_reset_acceleration.
+ * We do not compute the acceleration on star, therefore no need to use it.
+ *
+ * @param p The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_reset_feedback(
+    struct spart* restrict p) {
+
+  /* Reset time derivative */
+  p->feedback.h_dt = 0.f;
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  for (int i = 0; i < MAX_NUM_OF_NEIGHBOURS_STARS; ++i)
+    p->ids_ngbs_force[i] = -1;
+  p->num_ngb_force = 0;
+#endif
+}
+
+#endif /* SWIFT_EAGLE_STARS_H */
diff --git a/src/stars/Default/star_debug.h b/src/stars/EAGLE/stars_debug.h
similarity index 86%
rename from src/stars/Default/star_debug.h
rename to src/stars/EAGLE/stars_debug.h
index d940afac2eb67c97481f48a4bda6fa56085166d5..6bdba45e3b090ccd2eb207bf92a374ce531ee3e0 100644
--- a/src/stars/Default/star_debug.h
+++ b/src/stars/EAGLE/stars_debug.h
@@ -16,10 +16,10 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_DEFAULT_STAR_DEBUG_H
-#define SWIFT_DEFAULT_STAR_DEBUG_H
+#ifndef SWIFT_EAGLE_STARS_DEBUG_H
+#define SWIFT_EAGLE_STARS_DEBUG_H
 
-__attribute__((always_inline)) INLINE static void star_debug_particle(
+__attribute__((always_inline)) INLINE static void stars_debug_particle(
     const struct spart* p) {
   printf(
       "x=[%.3e,%.3e,%.3e], "
@@ -28,4 +28,4 @@ __attribute__((always_inline)) INLINE static void star_debug_particle(
       p->mass, p->ti_begin, p->ti_end);
 }
 
-#endif /* SWIFT_DEFAULT_STAR_DEBUG_H */
+#endif /* SWIFT_EAGLE_STARS_DEBUG_H */
diff --git a/src/stars/EAGLE/stars_iact.h b/src/stars/EAGLE/stars_iact.h
new file mode 100644
index 0000000000000000000000000000000000000000..aad611f50424a5ff8965835d4a2363d49406eb1c
--- /dev/null
+++ b/src/stars/EAGLE/stars_iact.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STARS_IACT_H
+#define SWIFT_EAGLE_STARS_IACT_H
+
+/**
+ * @brief Density interaction between two particles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param si First sparticle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_stars_density(float r2, const float *dx, float hi, float hj,
+                                 struct spart *restrict si,
+                                 const struct part *restrict pj, float a,
+                                 float H) {
+
+  /* Get the gas mass. */
+  const float mj = hydro_get_mass(pj);
+
+  float wi, wi_dx;
+
+  /* Get r and 1/r. */
+  const float r_inv = 1.0f / sqrtf(r2);
+  const float r = r2 * r_inv;
+
+  /* Compute the kernel function */
+  const float hi_inv = 1.0f / hi;
+  const float ui = r * hi_inv;
+  kernel_deval(ui, &wi, &wi_dx);
+
+  /* Compute contribution to the number of neighbours */
+  si->density.wcount += wi;
+  si->density.wcount_dh -= (hydro_dimension * wi + ui * wi_dx);
+
+  /* Compute contribution to the density */
+  si->rho_gas += mj * wi;
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /* Update ngb counters */
+  if (si->num_ngb_density < MAX_NUM_OF_NEIGHBOURS_STARS)
+    si->ids_ngbs_density[si->num_ngb_density] = pj->id;
+  ++si->num_ngb_density;
+#endif
+}
+
+/**
+ * @brief Feedback interaction between two particles (non-symmetric).
+ *
+ * @param r2 Comoving square distance between the two particles.
+ * @param dx Comoving vector separating both particles (pi - pj).
+ * @param hi Comoving smoothing-length of particle i.
+ * @param hj Comoving smoothing-length of particle j.
+ * @param si First sparticle.
+ * @param pj Second particle (not updated).
+ * @param a Current scale factor.
+ * @param H Current Hubble parameter.
+ */
+__attribute__((always_inline)) INLINE static void
+runner_iact_nonsym_stars_feedback(float r2, const float *dx, float hi, float hj,
+                                  struct spart *restrict si,
+                                  struct part *restrict pj, float a, float H) {}
+
+#endif /* SWIFT_EAGLE_STARS_IACT_H */
diff --git a/src/stars/EAGLE/stars_io.h b/src/stars/EAGLE/stars_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..d93b4bf7cf81c56bb65d7d5d8801f843a492ead0
--- /dev/null
+++ b/src/stars/EAGLE/stars_io.h
@@ -0,0 +1,204 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Coypright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STARS_IO_H
+#define SWIFT_EAGLE_STARS_IO_H
+
+#include "io_properties.h"
+#include "stars_part.h"
+
+/**
+ * @brief Specifies which s-particle fields to read from a dataset
+ *
+ * @param sparts The s-particle array.
+ * @param list The list of i/o properties to read.
+ * @param num_fields The number of i/o fields to read.
+ */
+INLINE static void stars_read_particles(struct spart *sparts,
+                                        struct io_props *list,
+                                        int *num_fields) {
+
+  /* Say how much we want to read */
+  *num_fields = 5;
+
+  /* List what we want to read */
+  list[0] = io_make_input_field("Coordinates", DOUBLE, 3, COMPULSORY,
+                                UNIT_CONV_LENGTH, sparts, x);
+  list[1] = io_make_input_field("Velocities", FLOAT, 3, COMPULSORY,
+                                UNIT_CONV_SPEED, sparts, v);
+  list[2] = io_make_input_field("Masses", FLOAT, 1, COMPULSORY, UNIT_CONV_MASS,
+                                sparts, mass);
+  list[3] = io_make_input_field("ParticleIDs", LONGLONG, 1, COMPULSORY,
+                                UNIT_CONV_NO_UNITS, sparts, id);
+  list[4] = io_make_input_field("SmoothingLength", FLOAT, 1, OPTIONAL,
+                                UNIT_CONV_LENGTH, sparts, h);
+}
+
+/**
+ * @brief Specifies which s-particle fields to write to a dataset
+ *
+ * @param sparts The s-particle array.
+ * @param list The list of i/o properties to write.
+ * @param num_fields The number of i/o fields to write.
+ */
+INLINE static void stars_write_particles(const struct spart *sparts,
+                                         struct io_props *list,
+                                         int *num_fields) {
+
+  /* Say how much we want to write */
+  *num_fields = 9;
+
+  /* List what we want to write */
+  list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
+                                 sparts, x);
+  list[1] =
+      io_make_output_field("Velocities", FLOAT, 3, UNIT_CONV_SPEED, sparts, v);
+  list[2] =
+      io_make_output_field("Masses", FLOAT, 1, UNIT_CONV_MASS, sparts, mass);
+  list[3] = io_make_output_field("ParticleIDs", LONGLONG, 1, UNIT_CONV_NO_UNITS,
+                                 sparts, id);
+  list[4] = io_make_output_field("SmoothingLength", FLOAT, 1, UNIT_CONV_LENGTH,
+                                 sparts, h);
+  list[5] = io_make_output_field("BirthDensity", FLOAT, 1, UNIT_CONV_DENSITY,
+                                 sparts, birth_density);
+  list[6] = io_make_output_field("InitialMasses", FLOAT, 1, UNIT_CONV_MASS,
+                                 sparts, mass_init);
+  list[7] = io_make_output_field("BirthTime", FLOAT, 1, UNIT_CONV_TIME, sparts,
+                                 birth_time);
+  list[8] = io_make_output_field("GasDensity", FLOAT, 1, UNIT_CONV_DENSITY,
+                                 sparts, rho_gas);
+}
+
+/**
+ * @brief Initialize the global properties of the stars scheme.
+ *
+ * By default, takes the values provided by the hydro.
+ *
+ * @param sp The #stars_props.
+ * @param phys_const The physical constants in the internal unit system.
+ * @param us The internal unit system.
+ * @param params The parsed parameters.
+ * @param p The already read-in properties of the hydro scheme.
+ */
+INLINE static void stars_props_init(struct stars_props *sp,
+                                    const struct phys_const *phys_const,
+                                    const struct unit_system *us,
+                                    struct swift_params *params,
+                                    const struct hydro_props *p) {
+
+  /* Kernel properties */
+  sp->eta_neighbours = parser_get_opt_param_float(
+      params, "Stars:resolution_eta", p->eta_neighbours);
+
+  /* Tolerance for the smoothing length Newton-Raphson scheme */
+  sp->h_tolerance =
+      parser_get_opt_param_float(params, "Stars:h_tolerance", p->h_tolerance);
+
+  /* Get derived properties */
+  sp->target_neighbours = pow_dimension(sp->eta_neighbours) * kernel_norm;
+  const float delta_eta = sp->eta_neighbours * (1.f + sp->h_tolerance);
+  sp->delta_neighbours =
+      (pow_dimension(delta_eta) - pow_dimension(sp->eta_neighbours)) *
+      kernel_norm;
+
+  /* Number of iterations to converge h */
+  sp->max_smoothing_iterations = parser_get_opt_param_int(
+      params, "Stars:max_ghost_iterations", p->max_smoothing_iterations);
+
+  /* Initialize with solar abundance */
+  // sp->chemistry_data.smoothed_metal_mass_fraction_total =
+
+  /* Time integration properties */
+  const float max_volume_change =
+      parser_get_opt_param_float(params, "Stars:max_volume_change", -1);
+  if (max_volume_change == -1)
+    sp->log_max_h_change = p->log_max_h_change;
+  else
+    sp->log_max_h_change = logf(powf(max_volume_change, hydro_dimension_inv));
+}
+
+/**
+ * @brief Print the global properties of the stars scheme.
+ *
+ * @param sp The #stars_props.
+ */
+INLINE static void stars_props_print(const struct stars_props *sp) {
+
+  /* Now stars */
+  message("Stars kernel: %s with eta=%f (%.2f neighbours).", kernel_name,
+          sp->eta_neighbours, sp->target_neighbours);
+
+  message("Stars relative tolerance in h: %.5f (+/- %.4f neighbours).",
+          sp->h_tolerance, sp->delta_neighbours);
+
+  message(
+      "Stars integration: Max change of volume: %.2f "
+      "(max|dlog(h)/dt|=%f).",
+      pow_dimension(expf(sp->log_max_h_change)), sp->log_max_h_change);
+
+  message("Maximal iterations in ghost task set to %d",
+          sp->max_smoothing_iterations);
+}
+
+#if defined(HAVE_HDF5)
+INLINE static void stars_props_print_snapshot(hid_t h_grpstars,
+                                              const struct stars_props *sp) {
+
+  io_write_attribute_s(h_grpstars, "Kernel function", kernel_name);
+  io_write_attribute_f(h_grpstars, "Kernel target N_ngb",
+                       sp->target_neighbours);
+  io_write_attribute_f(h_grpstars, "Kernel delta N_ngb", sp->delta_neighbours);
+  io_write_attribute_f(h_grpstars, "Kernel eta", sp->eta_neighbours);
+  io_write_attribute_f(h_grpstars, "Smoothing length tolerance",
+                       sp->h_tolerance);
+  io_write_attribute_f(h_grpstars, "Volume log(max(delta h))",
+                       sp->log_max_h_change);
+  io_write_attribute_f(h_grpstars, "Volume max change time-step",
+                       pow_dimension(expf(sp->log_max_h_change)));
+  io_write_attribute_i(h_grpstars, "Max ghost iterations",
+                       sp->max_smoothing_iterations);
+}
+#endif
+
+/**
+ * @brief Write a #stars_props struct to the given FILE as a stream of bytes.
+ *
+ * @param p the struct
+ * @param stream the file stream
+ */
+INLINE static void stars_props_struct_dump(const struct stars_props *p,
+                                           FILE *stream) {
+  restart_write_blocks((void *)p, sizeof(struct stars_props), 1, stream,
+                       "starsprops", "stars props");
+}
+
+/**
+ * @brief Restore a stars_props struct from the given FILE as a stream of
+ * bytes.
+ *
+ * @param p the struct
+ * @param stream the file stream
+ */
+INLINE static void stars_props_struct_restore(const struct stars_props *p,
+                                              FILE *stream) {
+  restart_read_blocks((void *)p, sizeof(struct stars_props), 1, stream, NULL,
+                      "stars props");
+}
+
+#endif /* SWIFT_EAGLE_STAR_IO_H */
diff --git a/src/stars/EAGLE/stars_part.h b/src/stars/EAGLE/stars_part.h
new file mode 100644
index 0000000000000000000000000000000000000000..664b5c0d03dd5b762aab0a0f582f22312b2196c6
--- /dev/null
+++ b/src/stars/EAGLE/stars_part.h
@@ -0,0 +1,150 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *               2018 Folkert Nobels (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_EAGLE_STAR_PART_H
+#define SWIFT_EAGLE_STAR_PART_H
+
+/* Some standard headers. */
+#include <stdlib.h>
+
+/* Read chemistry */
+#include "chemistry_struct.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Particle fields for the star particles.
+ *
+ * All quantities related to gravity are stored in the associate #gpart.
+ */
+struct spart {
+
+  /*! Particle ID. */
+  long long id;
+
+  /*! Pointer to corresponding gravity part. */
+  struct gpart* gpart;
+
+  /*! Particle position. */
+  double x[3];
+
+  /* Offset between current position and position at last tree rebuild. */
+  float x_diff[3];
+
+  /* Offset between current position and position at last tree rebuild. */
+  float x_diff_sort[3];
+
+  /*! Particle velocity. */
+  float v[3];
+
+  /*! Star mass */
+  float mass;
+
+  /*! Initial star mass */
+  float mass_init;
+
+  /*! Particle smoothing length. */
+  float h;
+
+  /*! Density of the gas surrounding the star. */
+  float rho_gas;
+
+  /*! Particle time bin */
+  timebin_t time_bin;
+
+  struct {
+
+    /* Number of neighbours. */
+    float wcount;
+
+    /* Number of neighbours spatial derivative. */
+    float wcount_dh;
+
+  } density;
+
+  struct {
+
+    /* Change in smoothing length over time. */
+    float h_dt;
+
+  } feedback;
+
+  /*! Union for the birth time and birht scale factor */
+  union {
+
+    /*! Birth time */
+    float birth_time;
+
+    /*! Birth scale factor */
+    float birth_scale_factor;
+  };
+
+  /*! Birth density */
+  float birth_density;
+
+  /*! Tracer structure */
+  struct tracers_xpart_data tracers_data;
+
+  /*! Chemistry structure */
+  struct chemistry_part_data chemistry_data;
+
+#ifdef SWIFT_DEBUG_CHECKS
+
+  /* Time of the last drift */
+  integertime_t ti_drift;
+
+  /* Time of the last kick */
+  integertime_t ti_kick;
+
+#endif
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /*! List of interacting particles in the density SELF and PAIR */
+  long long ids_ngbs_density[MAX_NUM_OF_NEIGHBOURS_STARS];
+
+  /*! Number of interactions in the density SELF and PAIR */
+  int num_ngb_density;
+#endif
+
+} SWIFT_STRUCT_ALIGN;
+
+/**
+ * @brief Contains all the constants and parameters of the stars scheme
+ */
+struct stars_props {
+
+  /*! Resolution parameter */
+  float eta_neighbours;
+
+  /*! Target weightd number of neighbours (for info only)*/
+  float target_neighbours;
+
+  /*! Smoothing length tolerance */
+  float h_tolerance;
+
+  /*! Tolerance on neighbour number  (for info only)*/
+  float delta_neighbours;
+
+  /*! Maximal number of iterations to converge h */
+  int max_smoothing_iterations;
+
+  /*! Maximal change of h over one time-step */
+  float log_max_h_change;
+};
+
+#endif /* SWIFT_EAGLE_STAR_PART_H */
diff --git a/src/stars_io.h b/src/stars_io.h
index 18a13ec19163008f1c8e9f64cf544ddf812db655..c2a095c47c8d491fe6cf97d22c367d1e9d4a7fd6 100644
--- a/src/stars_io.h
+++ b/src/stars_io.h
@@ -16,11 +16,21 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_STAR_IO_H
-#define SWIFT_STAR_IO_H
+#ifndef SWIFT_STARS_IO_H
+#define SWIFT_STARS_IO_H
 
+#include "../config.h"
 #include "./const.h"
 
-#include "./stars/Default/star_io.h"
+/* Load the correct star type */
+#if defined(STARS_NONE)
+#include "./stars/Default/stars_io.h"
+#elif defined(STARS_EAGLE)
+#include "./stars/EAGLE/stars_io.h"
+#elif defined(STARS_GEAR)
+#include "./stars/GEAR/stars_io.h"
+#else
+#error "Invalid choice of star model"
+#endif
 
-#endif /* SWIFT_STAR_IO_H */
+#endif /* SWIFT_STARS_IO_H */
diff --git a/src/statistics.c b/src/statistics.c
index bdca6cfb4ef84bb64aa4776bfc600b0727e0d606..8866f345a4d0ccad8d3a50f30f6b07ff7787dbbd 100644
--- a/src/statistics.c
+++ b/src/statistics.c
@@ -21,6 +21,7 @@
 #include "../config.h"
 
 /* Some standard headers. */
+#include <math.h>
 #include <string.h>
 
 /* MPI headers. */
@@ -136,7 +137,7 @@ void stats_collect_part_mapper(void *map_data, int nr_parts, void *extra_data) {
     /* Get the particle */
     const struct part *p = &parts[k];
     const struct xpart *xp = &xparts[k];
-    const struct gpart *gp = (p->gpart != NULL) ? gp = p->gpart : NULL;
+    const struct gpart *gp = p->gpart;
 
     /* Get useful time variables */
     const integertime_t ti_beg =
@@ -166,8 +167,8 @@ void stats_collect_part_mapper(void *map_data, int nr_parts, void *extra_data) {
     hydro_get_drifted_velocities(p, xp, dt_kick_hydro, dt_kick_grav, v);
     const double x[3] = {p->x[0], p->x[1], p->x[2]};
     const float m = hydro_get_mass(p);
-    const float entropy = hydro_get_physical_entropy(p, cosmo);
-    const float u_inter = hydro_get_physical_internal_energy(p, cosmo);
+    const float entropy = hydro_get_drifted_physical_entropy(p, cosmo);
+    const float u_inter = hydro_get_drifted_physical_internal_energy(p, cosmo);
 
     /* Collect mass */
     stats.mass += m;
@@ -386,7 +387,7 @@ MPI_Op statistics_mpi_reduce_op;
 /**
  * @brief MPI reduce operator for #statistics structures.
  */
-void stats_add_MPI(void *in, void *inout, int *len, MPI_Datatype *datatype) {
+void stats_add_mpi(void *in, void *inout, int *len, MPI_Datatype *datatype) {
 
   for (int i = 0; i < *len; ++i)
     stats_add(&((struct statistics *)inout)[0],
@@ -396,7 +397,7 @@ void stats_add_MPI(void *in, void *inout, int *len, MPI_Datatype *datatype) {
 /**
  * @brief Registers MPI #statistics type and reduction function.
  */
-void stats_create_MPI_type(void) {
+void stats_create_mpi_type(void) {
 
   /* This is not the recommended way of doing this.
      One should define the structure field by field
@@ -411,6 +412,6 @@ void stats_create_MPI_type(void) {
   }
 
   /* Create the reduction operation */
-  MPI_Op_create(stats_add_MPI, 1, &statistics_mpi_reduce_op);
+  MPI_Op_create(stats_add_mpi, 1, &statistics_mpi_reduce_op);
 }
 #endif
diff --git a/src/statistics.h b/src/statistics.h
index adc9f5b6a24a093419b7dd644404a68ef736a685..b741eac3d406d767f5652234b9a16d82464cc456 100644
--- a/src/statistics.h
+++ b/src/statistics.h
@@ -76,8 +76,7 @@ void stats_finalize(struct statistics* s);
 extern MPI_Datatype statistics_mpi_type;
 extern MPI_Op statistics_mpi_reduce_op;
 
-void stats_add_MPI(void* in, void* out, int* len, MPI_Datatype* datatype);
-void stats_create_MPI_type(void);
+void stats_create_mpi_type(void);
 #endif
 
 #endif /* SWIFT_STATISTICS_H */
diff --git a/src/swift.h b/src/swift.h
index e10938addb99956c202b3e4dd2b0592b580fa948..e166dde5dd3baed07fb5c081c64ce941d6c6ce6d 100644
--- a/src/swift.h
+++ b/src/swift.h
@@ -38,6 +38,7 @@
 #include "debug.h"
 #include "dump.h"
 #include "engine.h"
+#include "entropy_floor.h"
 #include "error.h"
 #include "gravity.h"
 #include "gravity_derivatives.h"
@@ -46,6 +47,7 @@
 #include "hydro_properties.h"
 #include "lock.h"
 #include "logger.h"
+#include "logger_io.h"
 #include "map.h"
 #include "mesh_gravity.h"
 #include "multipole.h"
@@ -59,13 +61,16 @@
 #include "potential.h"
 #include "profiler.h"
 #include "queue.h"
+#include "random.h"
 #include "restart.h"
 #include "runner.h"
 #include "scheduler.h"
 #include "serial_io.h"
 #include "single_io.h"
-#include "sourceterms.h"
 #include "space.h"
+#include "star_formation.h"
+#include "stars.h"
+#include "stars_io.h"
 #include "task.h"
 #include "threadpool.h"
 #include "timeline.h"
diff --git a/src/swift_velociraptor_part.h b/src/swift_velociraptor_part.h
index 80ee94ba612299dbe8b451cf1ef9d0ee45f8bf53..700842ac5a13e5bee4af15cc0d8726fc668ce421 100644
--- a/src/swift_velociraptor_part.h
+++ b/src/swift_velociraptor_part.h
@@ -19,7 +19,15 @@
 #ifndef SWIFT_VELOCIRAPTOR_PART_H
 #define SWIFT_VELOCIRAPTOR_PART_H
 
-/* SWIFT/VELOCIraptor particle. */
+#include "part_type.h"
+
+/**
+ * @brief SWIFT/VELOCIraptor particle.
+ *
+ * This should match the structure Swift::swift_vel_part
+ * defined in the file NBodylib/src/NBody/SwiftParticle.h
+ * of the VELOCIraptor code.
+ */
 struct swift_vel_part {
 
   /*! Particle ID. */
@@ -40,8 +48,18 @@ struct swift_vel_part {
   /*! Internal energy of gas particle */
   float u;
 
+  /*! Temperature of a gas particle */
+  float T;
+
   /*! Type of the #gpart (DM, gas, star, ...) */
   enum part_type type;
+
+  /*! MPI rank on which this #gpart lives on the SWIFT side. */
+  int task;
+
+  /*! Index of this #gpart in the global array of this rank on the SWIFT
+    side. */
+  int index;
 };
 
 #endif /* SWIFT_VELOCIRAPTOR_PART_H */
diff --git a/src/task.c b/src/task.c
index 10f2ddf5cec885ec23c4f65db5cdea50f0e5097b..34c636b48ed6ff3fefdf1e7847a67ca56ea79c89 100644
--- a/src/task.c
+++ b/src/task.c
@@ -42,26 +42,57 @@
 
 /* Local headers. */
 #include "atomic.h"
+#include "engine.h"
 #include "error.h"
 #include "inline.h"
 #include "lock.h"
 
 /* Task type names. */
-const char *taskID_names[task_type_count] = {
-    "none",       "sort",          "self",
-    "pair",       "sub_self",      "sub_pair",
-    "init_grav",  "init_grav_out", "ghost_in",
-    "ghost",      "ghost_out",     "extra_ghost",
-    "drift_part", "drift_gpart",   "end_force",
-    "kick1",      "kick2",         "timestep",
-    "send",       "recv",          "grav_long_range",
-    "grav_mm",    "grav_down_in",  "grav_down",
-    "grav_mesh",  "cooling",       "sourceterms"};
+const char *taskID_names[task_type_count] = {"none",
+                                             "sort",
+                                             "self",
+                                             "pair",
+                                             "sub_self",
+                                             "sub_pair",
+                                             "init_grav",
+                                             "init_grav_out",
+                                             "ghost_in",
+                                             "ghost",
+                                             "ghost_out",
+                                             "extra_ghost",
+                                             "drift_part",
+                                             "drift_spart",
+                                             "drift_gpart",
+                                             "drift_gpart_out",
+                                             "end_hydro_force",
+                                             "kick1",
+                                             "kick2",
+                                             "timestep",
+                                             "timestep_limiter",
+                                             "send",
+                                             "recv",
+                                             "grav_long_range",
+                                             "grav_mm",
+                                             "grav_down_in",
+                                             "grav_down",
+                                             "grav_mesh",
+                                             "grav_end_force",
+                                             "cooling",
+                                             "star_formation",
+                                             "logger",
+                                             "stars_in",
+                                             "stars_out",
+                                             "stars_ghost_in",
+                                             "stars_ghost",
+                                             "stars_ghost_out",
+                                             "stars_sort"};
 
 /* Sub-task type names. */
 const char *subtaskID_names[task_subtype_count] = {
-    "none", "density", "gradient", "force", "grav",      "external_grav",
-    "tend", "xv",      "rho",      "gpart", "multipole", "spart"};
+    "none",    "density",       "gradient",      "force",
+    "limiter", "grav",          "external_grav", "tend",
+    "xv",      "rho",           "gpart",         "multipole",
+    "spart",   "stars_density", "stars_feedback"};
 
 #ifdef WITH_MPI
 /* MPI communicators for the subtypes. */
@@ -71,46 +102,31 @@ MPI_Comm subtaskMPI_comms[task_subtype_count];
 /**
  * @brief Computes the overlap between the parts array of two given cells.
  *
- * @param ci The first #cell.
- * @param cj The second #cell.
+ * @param TYPE is the type of parts (e.g. #part, #gpart, #spart)
+ * @param ARRAY is the array of this specific type.
+ * @param COUNT is the number of elements in the array.
  */
-__attribute__((always_inline)) INLINE static size_t task_cell_overlap_part(
-    const struct cell *restrict ci, const struct cell *restrict cj) {
-
-  if (ci == NULL || cj == NULL) return 0;
-
-  if (ci->parts <= cj->parts &&
-      ci->parts + ci->count >= cj->parts + cj->count) {
-    return cj->count;
-  } else if (cj->parts <= ci->parts &&
-             cj->parts + cj->count >= ci->parts + ci->count) {
-    return ci->count;
+#define TASK_CELL_OVERLAP(TYPE, ARRAY, COUNT)                               \
+  __attribute__((always_inline))                                            \
+      INLINE static size_t task_cell_overlap_##TYPE(                        \
+          const struct cell *restrict ci, const struct cell *restrict cj) { \
+                                                                            \
+    if (ci == NULL || cj == NULL) return 0;                                 \
+                                                                            \
+    if (ci->ARRAY <= cj->ARRAY &&                                           \
+        ci->ARRAY + ci->COUNT >= cj->ARRAY + cj->COUNT) {                   \
+      return cj->COUNT;                                                     \
+    } else if (cj->ARRAY <= ci->ARRAY &&                                    \
+               cj->ARRAY + cj->COUNT >= ci->ARRAY + ci->COUNT) {            \
+      return ci->COUNT;                                                     \
+    }                                                                       \
+                                                                            \
+    return 0;                                                               \
   }
 
-  return 0;
-}
-
-/**
- * @brief Computes the overlap between the gparts array of two given cells.
- *
- * @param ci The first #cell.
- * @param cj The second #cell.
- */
-__attribute__((always_inline)) INLINE static size_t task_cell_overlap_gpart(
-    const struct cell *restrict ci, const struct cell *restrict cj) {
-
-  if (ci == NULL || cj == NULL) return 0;
-
-  if (ci->gparts <= cj->gparts &&
-      ci->gparts + ci->gcount >= cj->gparts + cj->gcount) {
-    return cj->gcount;
-  } else if (cj->gparts <= ci->gparts &&
-             cj->gparts + cj->gcount >= ci->gparts + ci->gcount) {
-    return ci->gcount;
-  }
-
-  return 0;
-}
+TASK_CELL_OVERLAP(part, hydro.parts, hydro.count);
+TASK_CELL_OVERLAP(gpart, grav.parts, grav.count);
+TASK_CELL_OVERLAP(spart, stars.parts, stars.count);
 
 /**
  * @brief Returns the #task_actions for a given task.
@@ -130,11 +146,21 @@ __attribute__((always_inline)) INLINE static enum task_actions task_acts_on(
     case task_type_sort:
     case task_type_ghost:
     case task_type_extra_ghost:
+    case task_type_timestep_limiter:
     case task_type_cooling:
-    case task_type_sourceterms:
+    case task_type_end_hydro_force:
       return task_action_part;
       break;
 
+    case task_type_star_formation:
+      return task_action_all;
+
+    case task_type_drift_spart:
+    case task_type_stars_ghost:
+    case task_type_stars_sort:
+      return task_action_spart;
+      break;
+
     case task_type_self:
     case task_type_pair:
     case task_type_sub_self:
@@ -144,57 +170,76 @@ __attribute__((always_inline)) INLINE static enum task_actions task_acts_on(
         case task_subtype_density:
         case task_subtype_gradient:
         case task_subtype_force:
+        case task_subtype_limiter:
           return task_action_part;
           break;
 
+        case task_subtype_stars_density:
+        case task_subtype_stars_feedback:
+          return task_action_all;
+          break;
+
         case task_subtype_grav:
         case task_subtype_external_grav:
           return task_action_gpart;
           break;
 
         default:
-          error("Unknow task_action for task");
+#ifdef SWIFT_DEBUG_CHECKS
+          error("Unknown task_action for task %s/%s", taskID_names[t->type],
+                subtaskID_names[t->subtype]);
+#endif
           return task_action_none;
           break;
       }
       break;
 
-    case task_type_end_force:
     case task_type_kick1:
     case task_type_kick2:
+    case task_type_logger:
     case task_type_timestep:
     case task_type_send:
     case task_type_recv:
-      if (t->ci->count > 0 && t->ci->gcount > 0)
+      if (t->ci->hydro.count > 0 && t->ci->grav.count > 0)
         return task_action_all;
-      else if (t->ci->count > 0)
+      else if (t->ci->hydro.count > 0)
         return task_action_part;
-      else if (t->ci->gcount > 0)
+      else if (t->ci->grav.count > 0)
         return task_action_gpart;
-      else
+      else {
+#ifdef SWIFT_DEBUG_CHECKS
         error("Task without particles");
+#endif
+      }
       break;
 
     case task_type_init_grav:
     case task_type_grav_mm:
+    case task_type_grav_long_range:
       return task_action_multipole;
       break;
 
     case task_type_drift_gpart:
     case task_type_grav_down:
+    case task_type_end_grav_force:
     case task_type_grav_mesh:
-    case task_type_grav_long_range:
       return task_action_gpart;
       break;
 
     default:
-      error("Unknown task_action for task");
+#ifdef SWIFT_DEBUG_CHECKS
+      error("Unknown task_action for task %s/%s", taskID_names[t->type],
+            subtaskID_names[t->subtype]);
+#endif
       return task_action_none;
       break;
   }
 
-  /* Silence compiler warnings */
-  error("Unknown task_action for task");
+#ifdef SWIFT_DEBUG_CHECKS
+  error("Unknown task_action for task %s/%s", taskID_names[t->type],
+        subtaskID_names[t->subtype]);
+#endif
+  /* Silence compiler warnings. We should never get here. */
   return task_action_none;
 }
 
@@ -220,19 +265,25 @@ float task_overlap(const struct task *restrict ta,
   const int ta_part = (ta_act == task_action_part || ta_act == task_action_all);
   const int ta_gpart =
       (ta_act == task_action_gpart || ta_act == task_action_all);
+  const int ta_spart =
+      (ta_act == task_action_spart || ta_act == task_action_all);
   const int tb_part = (tb_act == task_action_part || tb_act == task_action_all);
   const int tb_gpart =
       (tb_act == task_action_gpart || tb_act == task_action_all);
+  const int tb_spart =
+      (tb_act == task_action_spart || tb_act == task_action_all);
 
   /* In the case where both tasks act on parts */
   if (ta_part && tb_part) {
 
     /* Compute the union of the cell data. */
     size_t size_union = 0;
-    if (ta->ci != NULL) size_union += ta->ci->count;
-    if (ta->cj != NULL) size_union += ta->cj->count;
-    if (tb->ci != NULL) size_union += tb->ci->count;
-    if (tb->cj != NULL) size_union += tb->cj->count;
+    if (ta->ci != NULL) size_union += ta->ci->hydro.count;
+    if (ta->cj != NULL) size_union += ta->cj->hydro.count;
+    if (tb->ci != NULL) size_union += tb->ci->hydro.count;
+    if (tb->cj != NULL) size_union += tb->cj->hydro.count;
+
+    if (size_union == 0) return 0.f;
 
     /* Compute the intersection of the cell data. */
     const size_t size_intersect = task_cell_overlap_part(ta->ci, tb->ci) +
@@ -248,10 +299,12 @@ float task_overlap(const struct task *restrict ta,
 
     /* Compute the union of the cell data. */
     size_t size_union = 0;
-    if (ta->ci != NULL) size_union += ta->ci->gcount;
-    if (ta->cj != NULL) size_union += ta->cj->gcount;
-    if (tb->ci != NULL) size_union += tb->ci->gcount;
-    if (tb->cj != NULL) size_union += tb->cj->gcount;
+    if (ta->ci != NULL) size_union += ta->ci->grav.count;
+    if (ta->cj != NULL) size_union += ta->cj->grav.count;
+    if (tb->ci != NULL) size_union += tb->ci->grav.count;
+    if (tb->cj != NULL) size_union += tb->cj->grav.count;
+
+    if (size_union == 0) return 0.f;
 
     /* Compute the intersection of the cell data. */
     const size_t size_intersect = task_cell_overlap_gpart(ta->ci, tb->ci) +
@@ -262,6 +315,27 @@ float task_overlap(const struct task *restrict ta,
     return ((float)size_intersect) / (size_union - size_intersect);
   }
 
+  /* In the case where both tasks act on sparts */
+  else if (ta_spart && tb_spart) {
+
+    /* Compute the union of the cell data. */
+    size_t size_union = 0;
+    if (ta->ci != NULL) size_union += ta->ci->stars.count;
+    if (ta->cj != NULL) size_union += ta->cj->stars.count;
+    if (tb->ci != NULL) size_union += tb->ci->stars.count;
+    if (tb->cj != NULL) size_union += tb->cj->stars.count;
+
+    if (size_union == 0) return 0.f;
+
+    /* Compute the intersection of the cell data. */
+    const size_t size_intersect = task_cell_overlap_spart(ta->ci, tb->ci) +
+                                  task_cell_overlap_spart(ta->ci, tb->cj) +
+                                  task_cell_overlap_spart(ta->cj, tb->ci) +
+                                  task_cell_overlap_spart(ta->cj, tb->cj);
+
+    return ((float)size_intersect) / (size_union - size_intersect);
+  }
+
   /* Else, no overlap */
   return 0.f;
 }
@@ -280,9 +354,9 @@ void task_unlock(struct task *t) {
   /* Act based on task type. */
   switch (type) {
 
-    case task_type_end_force:
     case task_type_kick1:
     case task_type_kick2:
+    case task_type_logger:
     case task_type_timestep:
       cell_unlocktree(ci);
       cell_gunlocktree(ci);
@@ -290,19 +364,32 @@ void task_unlock(struct task *t) {
 
     case task_type_drift_part:
     case task_type_sort:
+    case task_type_ghost:
+    case task_type_end_hydro_force:
+    case task_type_timestep_limiter:
       cell_unlocktree(ci);
       break;
 
     case task_type_drift_gpart:
     case task_type_grav_mesh:
+    case task_type_end_grav_force:
       cell_gunlocktree(ci);
       break;
 
+    case task_type_stars_sort:
+      cell_sunlocktree(ci);
+      break;
+
     case task_type_self:
     case task_type_sub_self:
       if (subtype == task_subtype_grav) {
         cell_gunlocktree(ci);
         cell_munlocktree(ci);
+      } else if (subtype == task_subtype_stars_density) {
+        cell_sunlocktree(ci);
+      } else if (subtype == task_subtype_stars_feedback) {
+        cell_sunlocktree(ci);
+        cell_unlocktree(ci);
       } else {
         cell_unlocktree(ci);
       }
@@ -315,6 +402,14 @@ void task_unlock(struct task *t) {
         cell_gunlocktree(cj);
         cell_munlocktree(ci);
         cell_munlocktree(cj);
+      } else if (subtype == task_subtype_stars_density) {
+        cell_sunlocktree(ci);
+        cell_sunlocktree(cj);
+      } else if (subtype == task_subtype_stars_feedback) {
+        cell_sunlocktree(ci);
+        cell_sunlocktree(cj);
+        cell_unlocktree(ci);
+        cell_unlocktree(cj);
       } else {
         cell_unlocktree(ci);
         cell_unlocktree(cj);
@@ -335,6 +430,12 @@ void task_unlock(struct task *t) {
       cell_munlocktree(cj);
       break;
 
+    case task_type_star_formation:
+      cell_unlocktree(ci);
+      cell_sunlocktree(ci);
+      cell_gunlocktree(ci);
+      break;
+
     default:
       break;
   }
@@ -366,8 +467,10 @@ int task_lock(struct task *t) {
         char buff[MPI_MAX_ERROR_STRING];
         int len;
         MPI_Error_string(err, buff, &len);
-        error("Failed to test request on send/recv task (tag=%i, %s).",
-              t->flags, buff);
+        error(
+            "Failed to test request on send/recv task (type=%s/%s tag=%lld, "
+            "%s).",
+            taskID_names[t->type], subtaskID_names[t->subtype], t->flags, buff);
       }
       return res;
 #else
@@ -375,11 +478,11 @@ int task_lock(struct task *t) {
 #endif
       break;
 
-    case task_type_end_force:
     case task_type_kick1:
     case task_type_kick2:
+    case task_type_logger:
     case task_type_timestep:
-      if (ci->hold || ci->ghold) return 0;
+      if (ci->hydro.hold || ci->grav.phold) return 0;
       if (cell_locktree(ci) != 0) return 0;
       if (cell_glocktree(ci) != 0) {
         cell_unlocktree(ci);
@@ -389,13 +492,22 @@ int task_lock(struct task *t) {
 
     case task_type_drift_part:
     case task_type_sort:
-      if (ci->hold) return 0;
+    case task_type_ghost:
+    case task_type_end_hydro_force:
+    case task_type_timestep_limiter:
+      if (ci->hydro.hold) return 0;
       if (cell_locktree(ci) != 0) return 0;
       break;
 
+    case task_type_stars_sort:
+      if (ci->stars.hold) return 0;
+      if (cell_slocktree(ci) != 0) return 0;
+      break;
+
     case task_type_drift_gpart:
+    case task_type_end_grav_force:
     case task_type_grav_mesh:
-      if (ci->ghold) return 0;
+      if (ci->grav.phold) return 0;
       if (cell_glocktree(ci) != 0) return 0;
       break;
 
@@ -403,14 +515,26 @@ int task_lock(struct task *t) {
     case task_type_sub_self:
       if (subtype == task_subtype_grav) {
         /* Lock the gparts and the m-pole */
-        if (ci->ghold || ci->mhold) return 0;
+        if (ci->grav.phold || ci->grav.mhold) return 0;
         if (cell_glocktree(ci) != 0)
           return 0;
         else if (cell_mlocktree(ci) != 0) {
           cell_gunlocktree(ci);
           return 0;
         }
-      } else {
+      } else if (subtype == task_subtype_stars_density) {
+        if (ci->stars.hold) return 0;
+        if (cell_slocktree(ci) != 0) return 0;
+      } else if (subtype == task_subtype_stars_feedback) {
+        if (ci->stars.hold) return 0;
+        if (ci->hydro.hold) return 0;
+        if (cell_slocktree(ci) != 0) return 0;
+        if (cell_locktree(ci) != 0) {
+          cell_sunlocktree(ci);
+          return 0;
+        }
+      } else { /* subtype == hydro */
+        if (ci->hydro.hold) return 0;
         if (cell_locktree(ci) != 0) return 0;
       }
       break;
@@ -419,7 +543,7 @@ int task_lock(struct task *t) {
     case task_type_sub_pair:
       if (subtype == task_subtype_grav) {
         /* Lock the gparts and the m-pole in both cells */
-        if (ci->ghold || cj->ghold) return 0;
+        if (ci->grav.phold || cj->grav.phold) return 0;
         if (cell_glocktree(ci) != 0) return 0;
         if (cell_glocktree(cj) != 0) {
           cell_gunlocktree(ci);
@@ -434,9 +558,36 @@ int task_lock(struct task *t) {
           cell_munlocktree(ci);
           return 0;
         }
-      } else {
+      } else if (subtype == task_subtype_stars_density) {
+        if (ci->stars.hold || cj->stars.hold) return 0;
+        if (cell_slocktree(ci) != 0) return 0;
+        if (cell_slocktree(cj) != 0) {
+          cell_sunlocktree(ci);
+          return 0;
+        }
+      } else if (subtype == task_subtype_stars_feedback) {
+        /* Lock the stars and the gas particles in both cells */
+        if (ci->stars.hold || cj->stars.hold) return 0;
+        if (ci->hydro.hold || cj->hydro.hold) return 0;
+        if (cell_slocktree(ci) != 0) return 0;
+        if (cell_slocktree(cj) != 0) {
+          cell_sunlocktree(ci);
+          return 0;
+        }
+        if (cell_locktree(ci) != 0) {
+          cell_sunlocktree(ci);
+          cell_sunlocktree(cj);
+          return 0;
+        }
+        if (cell_locktree(cj) != 0) {
+          cell_sunlocktree(ci);
+          cell_sunlocktree(cj);
+          cell_unlocktree(ci);
+          return 0;
+        }
+      } else { /* subtype == hydro */
         /* Lock the parts in both cells */
-        if (ci->hold || cj->hold) return 0;
+        if (ci->hydro.hold || cj->hydro.hold) return 0;
         if (cell_locktree(ci) != 0) return 0;
         if (cell_locktree(cj) != 0) {
           cell_unlocktree(ci);
@@ -447,7 +598,7 @@ int task_lock(struct task *t) {
 
     case task_type_grav_down:
       /* Lock the gparts and the m-poles */
-      if (ci->ghold || ci->mhold) return 0;
+      if (ci->grav.phold || ci->grav.mhold) return 0;
       if (cell_glocktree(ci) != 0)
         return 0;
       else if (cell_mlocktree(ci) != 0) {
@@ -458,18 +609,33 @@ int task_lock(struct task *t) {
 
     case task_type_grav_long_range:
       /* Lock the m-poles */
-      if (ci->mhold) return 0;
+      if (ci->grav.mhold) return 0;
       if (cell_mlocktree(ci) != 0) return 0;
       break;
 
     case task_type_grav_mm:
       /* Lock both m-poles */
-      if (ci->mhold || cj->mhold) return 0;
+      if (ci->grav.mhold || cj->grav.mhold) return 0;
       if (cell_mlocktree(ci) != 0) return 0;
       if (cell_mlocktree(cj) != 0) {
         cell_munlocktree(ci);
         return 0;
       }
+      break;
+
+    case task_type_star_formation:
+      /* Lock the gas, gravity and star particles */
+      if (ci->hydro.hold || ci->stars.hold || ci->grav.phold) return 0;
+      if (cell_locktree(ci) != 0) return 0;
+      if (cell_slocktree(ci) != 0) {
+        cell_unlocktree(ci);
+        return 0;
+      }
+      if (cell_glocktree(ci) != 0) {
+        cell_unlocktree(ci);
+        cell_sunlocktree(ci);
+        return 0;
+      }
 
     default:
       break;
@@ -491,6 +657,81 @@ void task_print(const struct task *t) {
           t->nr_unlock_tasks, t->skip);
 }
 
+/**
+ * @brief Get the group name of a task.
+ *
+ * This is used to group tasks with similar actions in the task dependency
+ * graph.
+ *
+ * @param type The #task type.
+ * @param subtype The #task subtype.
+ * @param cluster (return) The group name (should be allocated)
+ */
+void task_get_group_name(int type, int subtype, char *cluster) {
+
+  if (type == task_type_grav_long_range || type == task_type_grav_mm ||
+      type == task_type_grav_mesh) {
+
+    strcpy(cluster, "Gravity");
+    return;
+  }
+
+  switch (subtype) {
+    case task_subtype_density:
+      strcpy(cluster, "Density");
+      break;
+    case task_subtype_gradient:
+      if (type == task_type_send || type == task_type_recv) {
+        strcpy(cluster, "None");
+      } else {
+        strcpy(cluster, "Gradient");
+      }
+      break;
+    case task_subtype_force:
+      strcpy(cluster, "Force");
+      break;
+    case task_subtype_grav:
+      strcpy(cluster, "Gravity");
+      break;
+    case task_subtype_limiter:
+      strcpy(cluster, "Timestep_limiter");
+      break;
+    case task_subtype_stars_density:
+      strcpy(cluster, "StarsDensity");
+      break;
+    case task_subtype_stars_feedback:
+      strcpy(cluster, "StarsFeedback");
+      break;
+    default:
+      strcpy(cluster, "None");
+      break;
+  }
+}
+
+/**
+ * @brief Generate the full name of a #task.
+ *
+ * @param type The #task type.
+ * @param subtype The #task type.
+ * @param name (return) The formatted string
+ */
+void task_get_full_name(int type, int subtype, char *name) {
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Check input */
+  if (type >= task_type_count) error("Unknown task type %i", type);
+
+  if (subtype >= task_subtype_count)
+    error("Unknown task subtype %i with type %s", subtype, taskID_names[type]);
+#endif
+
+  /* Full task name */
+  if (subtype == task_subtype_none)
+    sprintf(name, "%s", taskID_names[type]);
+  else
+    sprintf(name, "%s_%s", taskID_names[type], subtaskID_names[subtype]);
+}
+
 #ifdef WITH_MPI
 /**
  * @brief Create global communicators for each of the subtasks.
@@ -501,3 +742,243 @@ void task_create_mpi_comms(void) {
   }
 }
 #endif
+
+/**
+ * @brief dump all the tasks of all the known engines into a file for
+ * postprocessing.
+ *
+ * Dumps the information to a file "thread_info-stepn.dat" where n is the
+ * given step value, or "thread_info_MPI-stepn.dat", if we are running
+ * under MPI. Note if running under MPIU all the ranks are dumped into this
+ * one file, which has an additional field to identify the rank.
+ *
+ * @param e the #engine
+ * @param step the current step.
+ */
+void task_dump_all(struct engine *e, int step) {
+
+#ifdef SWIFT_DEBUG_TASKS
+
+  /* Need this to convert ticks to seconds. */
+  unsigned long long cpufreq = clocks_get_cpufreq();
+
+#ifdef WITH_MPI
+  /* Make sure output file is empty, only on one rank. */
+  char dumpfile[35];
+  snprintf(dumpfile, sizeof(dumpfile), "thread_info_MPI-step%d.dat", step);
+  FILE *file_thread;
+  if (engine_rank == 0) {
+    file_thread = fopen(dumpfile, "w");
+    fclose(file_thread);
+  }
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  for (int i = 0; i < e->nr_nodes; i++) {
+
+    /* Rank 0 decides the index of the writing node, this happens
+     * one-by-one. */
+    int kk = i;
+    MPI_Bcast(&kk, 1, MPI_INT, 0, MPI_COMM_WORLD);
+
+    if (i == engine_rank) {
+
+      /* Open file and position at end. */
+      file_thread = fopen(dumpfile, "a");
+
+      /* Add some information to help with the plots and conversion of ticks to
+       * seconds. */
+      fprintf(file_thread, " %03d 0 0 0 0 %lld %lld %lld %lld %lld 0 0 %lld\n",
+              engine_rank, (long long int)e->tic_step,
+              (long long int)e->toc_step, e->updates, e->g_updates,
+              e->s_updates, cpufreq);
+      int count = 0;
+      for (int l = 0; l < e->sched.nr_tasks; l++) {
+        if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0) {
+          fprintf(
+              file_thread, " %03i %i %i %i %i %lli %lli %i %i %i %i %lli %i\n",
+              engine_rank, e->sched.tasks[l].rid, e->sched.tasks[l].type,
+              e->sched.tasks[l].subtype, (e->sched.tasks[l].cj == NULL),
+              (long long int)e->sched.tasks[l].tic,
+              (long long int)e->sched.tasks[l].toc,
+              (e->sched.tasks[l].ci != NULL) ? e->sched.tasks[l].ci->hydro.count
+                                             : 0,
+              (e->sched.tasks[l].cj != NULL) ? e->sched.tasks[l].cj->hydro.count
+                                             : 0,
+              (e->sched.tasks[l].ci != NULL) ? e->sched.tasks[l].ci->grav.count
+                                             : 0,
+              (e->sched.tasks[l].cj != NULL) ? e->sched.tasks[l].cj->grav.count
+                                             : 0,
+              e->sched.tasks[l].flags, e->sched.tasks[l].sid);
+        }
+        count++;
+      }
+      fclose(file_thread);
+    }
+
+    /* And we wait for all to synchronize. */
+    MPI_Barrier(MPI_COMM_WORLD);
+  }
+
+#else
+  /* Non-MPI, so just a single engine's worth of tasks to dump. */
+  char dumpfile[32];
+  snprintf(dumpfile, sizeof(dumpfile), "thread_info-step%d.dat", step);
+  FILE *file_thread;
+  file_thread = fopen(dumpfile, "w");
+
+  /* Add some information to help with the plots and conversion of ticks to
+   * seconds. */
+  fprintf(file_thread, " %d %d %d %d %lld %lld %lld %lld %lld %d %lld\n", -2,
+          -1, -1, 1, (unsigned long long)e->tic_step,
+          (unsigned long long)e->toc_step, e->updates, e->g_updates,
+          e->s_updates, 0, cpufreq);
+  for (int l = 0; l < e->sched.nr_tasks; l++) {
+    if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0) {
+      fprintf(
+          file_thread, " %i %i %i %i %lli %lli %i %i %i %i %i\n",
+          e->sched.tasks[l].rid, e->sched.tasks[l].type,
+          e->sched.tasks[l].subtype, (e->sched.tasks[l].cj == NULL),
+          (unsigned long long)e->sched.tasks[l].tic,
+          (unsigned long long)e->sched.tasks[l].toc,
+          (e->sched.tasks[l].ci == NULL) ? 0
+                                         : e->sched.tasks[l].ci->hydro.count,
+          (e->sched.tasks[l].cj == NULL) ? 0
+                                         : e->sched.tasks[l].cj->hydro.count,
+          (e->sched.tasks[l].ci == NULL) ? 0 : e->sched.tasks[l].ci->grav.count,
+          (e->sched.tasks[l].cj == NULL) ? 0 : e->sched.tasks[l].cj->grav.count,
+          e->sched.tasks[l].sid);
+    }
+  }
+  fclose(file_thread);
+#endif  // WITH_MPI
+#endif  // SWIFT_DEBUG_TASKS
+}
+
+/**
+ * @brief Generate simple statistics about the times used by the tasks of
+ *        all the engines and write these into two format, a human readable
+ *        version for debugging and one intented for inclusion as the fixed
+ *        costs for repartitioning.
+ *
+ * Note that when running under MPI all the tasks can be summed into this single
+ * file. In the fuller, human readable file, the statistics included are the
+ * number of task of each type/subtype followed by the minimum, maximum, mean
+ * and total time, in millisec and then the fixed costs value.
+ *
+ * If header is set, only the fixed costs value is written into the output
+ * file in a format that is suitable for inclusion in SWIFT (as
+ * partition_fixed_costs.h).
+ *
+ * @param dumpfile name of the file for the output.
+ * @param e the #engine
+ * @param header whether to write a header include file.
+ * @param allranks do the statistics over all ranks, if not just the current
+ *                 one, only used if header is false.
+ */
+void task_dump_stats(const char *dumpfile, struct engine *e, int header,
+                     int allranks) {
+
+  /* Need arrays for sum, min and max across all types and subtypes. */
+  double sum[task_type_count][task_subtype_count];
+  double min[task_type_count][task_subtype_count];
+  double max[task_type_count][task_subtype_count];
+  int count[task_type_count][task_subtype_count];
+
+  for (int j = 0; j < task_type_count; j++) {
+    for (int k = 0; k < task_subtype_count; k++) {
+      sum[j][k] = 0.0;
+      count[j][k] = 0;
+      min[j][k] = DBL_MAX;
+      max[j][k] = 0.0;
+    }
+  }
+
+  double total[1] = {0.0};
+  for (int l = 0; l < e->sched.nr_tasks; l++) {
+    int type = e->sched.tasks[l].type;
+
+    /* Skip implicit tasks, tasks that didn't run and MPI send/recv as these
+     * are not interesting (or meaningfully measured). */
+    if (!e->sched.tasks[l].implicit && e->sched.tasks[l].toc != 0 &&
+        type != task_type_send && type != task_type_recv) {
+      int subtype = e->sched.tasks[l].subtype;
+
+      double dt = e->sched.tasks[l].toc - e->sched.tasks[l].tic;
+      sum[type][subtype] += dt;
+      count[type][subtype] += 1;
+      if (dt < min[type][subtype]) {
+        min[type][subtype] = dt;
+      }
+      if (dt > max[type][subtype]) {
+        max[type][subtype] = dt;
+      }
+      total[0] += dt;
+    }
+  }
+
+#ifdef WITH_MPI
+  if (allranks || header) {
+    /* Get these from all ranks for output from rank 0. Could wrap these into a
+     * single operation. */
+    size_t size = task_type_count * task_subtype_count;
+    int res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : sum), sum, size,
+                         MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task sums");
+
+    res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : count), count, size,
+                     MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task counts");
+
+    res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : min), min, size,
+                     MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task minima");
+
+    res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : max), max, size,
+                     MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task maxima");
+
+    res = MPI_Reduce((engine_rank == 0 ? MPI_IN_PLACE : total), total, 1,
+                     MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
+    if (res != MPI_SUCCESS) mpi_error(res, "Failed to reduce task total time");
+  }
+
+  if (!allranks || (engine_rank == 0 && (allranks || header))) {
+#endif
+
+    FILE *dfile = fopen(dumpfile, "w");
+    if (header) {
+      fprintf(dfile, "/* use as src/partition_fixed_costs.h */\n");
+      fprintf(dfile, "#define HAVE_FIXED_COSTS 1\n");
+    } else {
+      fprintf(dfile, "# task ntasks min max sum mean percent fixed_cost\n");
+    }
+
+    for (int j = 0; j < task_type_count; j++) {
+      const char *taskID = taskID_names[j];
+      for (int k = 0; k < task_subtype_count; k++) {
+        if (sum[j][k] > 0.0) {
+          double mean = sum[j][k] / (double)count[j][k];
+          double perc = 100.0 * sum[j][k] / total[0];
+
+          /* Fixed cost is in .1ns as we want to compare between runs in
+           * some absolute units. */
+          int fixed_cost = (int)(clocks_from_ticks(mean) * 10000.f);
+          if (header) {
+            fprintf(dfile, "repartition_costs[%d][%d] = %10d; /* %s/%s */\n", j,
+                    k, fixed_cost, taskID, subtaskID_names[k]);
+          } else {
+            fprintf(dfile,
+                    "%15s/%-10s %10d %14.4f %14.4f %14.4f %14.4f %14.4f %10d\n",
+                    taskID, subtaskID_names[k], count[j][k],
+                    clocks_from_ticks(min[j][k]), clocks_from_ticks(max[j][k]),
+                    clocks_from_ticks(sum[j][k]), clocks_from_ticks(mean), perc,
+                    fixed_cost);
+          }
+        }
+      }
+    }
+    fclose(dfile);
+#ifdef WITH_MPI
+  }
+#endif
+}
diff --git a/src/task.h b/src/task.h
index 58ea3a8cbb93b38b47ab7b6a243c3ee6c85d40b7..704d1a5ef80f1208bce69d0acf7625fb36fa19e1 100644
--- a/src/task.h
+++ b/src/task.h
@@ -52,11 +52,14 @@ enum task_types {
   task_type_ghost_out, /* Implicit */
   task_type_extra_ghost,
   task_type_drift_part,
+  task_type_drift_spart,
   task_type_drift_gpart,
-  task_type_end_force,
+  task_type_drift_gpart_out, /* Implicit */
+  task_type_end_hydro_force,
   task_type_kick1,
   task_type_kick2,
   task_type_timestep,
+  task_type_timestep_limiter,
   task_type_send,
   task_type_recv,
   task_type_grav_long_range,
@@ -64,8 +67,16 @@ enum task_types {
   task_type_grav_down_in, /* Implicit */
   task_type_grav_down,
   task_type_grav_mesh,
+  task_type_end_grav_force,
   task_type_cooling,
-  task_type_sourceterms,
+  task_type_star_formation,
+  task_type_logger,
+  task_type_stars_in,       /* Implicit */
+  task_type_stars_out,      /* Implicit */
+  task_type_stars_ghost_in, /* Implicit */
+  task_type_stars_ghost,
+  task_type_stars_ghost_out, /* Implicit */
+  task_type_stars_sort,
   task_type_count
 } __attribute__((packed));
 
@@ -77,6 +88,7 @@ enum task_subtypes {
   task_subtype_density,
   task_subtype_gradient,
   task_subtype_force,
+  task_subtype_limiter,
   task_subtype_grav,
   task_subtype_external_grav,
   task_subtype_tend,
@@ -85,6 +97,8 @@ enum task_subtypes {
   task_subtype_gpart,
   task_subtype_multipole,
   task_subtype_spart,
+  task_subtype_stars_density,
+  task_subtype_stars_feedback,
   task_subtype_count
 } __attribute__((packed));
 
@@ -95,6 +109,7 @@ enum task_actions {
   task_action_none,
   task_action_part,
   task_action_gpart,
+  task_action_spart,
   task_action_all,
   task_action_multipole,
   task_action_count
@@ -128,6 +143,9 @@ struct task {
   /*! List of tasks unlocked by this one */
   struct task **unlock_tasks;
 
+  /*! Flags used to carry additional information (e.g. sort directions) */
+  long long flags;
+
 #ifdef WITH_MPI
 
   /*! Buffer for this task's communications */
@@ -138,20 +156,12 @@ struct task {
 
 #endif
 
-  /*! Flags used to carry additional information (e.g. sort directions) */
-  int flags;
-
   /*! Rank of a task in the order */
   int rank;
 
   /*! Weight of the task */
   float weight;
 
-#if defined(WITH_MPI) && defined(HAVE_METIS)
-  /*! Individual cost estimate for this task. */
-  float cost;
-#endif
-
   /*! Number of tasks unlocked by this one */
   short int nr_unlock_tasks;
 
@@ -176,10 +186,10 @@ struct task {
 
   /*! Information about the direction of the pair task */
   short int sid;
+#endif
 
   /*! Start and end time of this task */
   ticks tic, toc;
-#endif
 
 #ifdef SWIFT_DEBUG_CHECKS
   /* When was this task last run? */
@@ -194,6 +204,12 @@ float task_overlap(const struct task *ta, const struct task *tb);
 int task_lock(struct task *t);
 void task_do_rewait(struct task *t);
 void task_print(const struct task *t);
+void task_dump_all(struct engine *e, int step);
+void task_dump_stats(const char *dumpfile, struct engine *e, int header,
+                     int allranks);
+void task_get_full_name(int type, int subtype, char *name);
+void task_get_group_name(int type, int subtype, char *cluster);
+
 #ifdef WITH_MPI
 void task_create_mpi_comms(void);
 #endif
diff --git a/src/timeline.h b/src/timeline.h
index 4078a904c3e9205b8ea6ae7090534bd3d3d0784f..a2bb8da6e8c5c92288541c206b453af141bf094e 100644
--- a/src/timeline.h
+++ b/src/timeline.h
@@ -27,9 +27,10 @@
 #include "intrinsics.h"
 
 #include <math.h>
+#include <stdint.h>
 
 typedef long long integertime_t;
-typedef char timebin_t;
+typedef int8_t timebin_t;
 
 /*! The number of time bins */
 #define num_time_bins 56
@@ -40,6 +41,9 @@ typedef char timebin_t;
 /*! Fictious time-bin to hold inhibited particles */
 #define time_bin_inhibited (num_time_bins + 2)
 
+/*! Fictious time-bin to hold particles not yet created */
+#define time_bin_not_created (num_time_bins + 3)
+
 /*! Fictitious time-bin for particles not awaken */
 #define time_bin_not_awake (0)
 
@@ -62,24 +66,30 @@ get_integer_timestep(timebin_t bin) {
  * @brief Returns the time bin corresponding to a given time_step size.
  *
  * Assumes that integertime_t maps to an unsigned long long.
+ * Given our definitions, this is log_2 of the time_step rounded down minus one.
+ *
+ * We use a fast (but exact for any non-zero value) logarithm in base 2
+ * calculation based on the bit representation of the number:
+ * log_2(x) = (number of bits in the type) - (number of leading 0-bits in x) - 1
  */
 __attribute__((const)) static INLINE timebin_t
 get_time_bin(integertime_t time_step) {
 
   /* ((int) log_2(time_step)) - 1 */
-  return (timebin_t)(62 - intrinsics_clzll(time_step));
+  return (timebin_t)((8 * sizeof(integertime_t) - 2) -
+                     intrinsics_clzll((unsigned long long)time_step));
 }
 
 /**
  * @brief Returns the physical time interval corresponding to a time bin.
  *
  * @param bin The time bin of interest.
- * @param timeBase the minimal time-step size of the simulation.
+ * @param time_base the minimal time-step size of the simulation.
  */
 __attribute__((const)) static INLINE double get_timestep(timebin_t bin,
-                                                         double timeBase) {
+                                                         double time_base) {
 
-  return get_integer_timestep(bin) * timeBase;
+  return get_integer_timestep(bin) * time_base;
 }
 
 /**
@@ -142,7 +152,7 @@ __attribute__((const)) static INLINE timebin_t
 get_min_active_bin(integertime_t ti_current, integertime_t ti_old) {
 
   const timebin_t min_bin = get_max_active_bin(ti_current - ti_old);
-  return (ti_old > 0) ? min_bin : (min_bin - 1);
+  return min_bin;
 }
 
 #endif /* SWIFT_TIMELINE_H */
diff --git a/src/timers.c b/src/timers.c
index e3fbfdb01249e98e46d2c60d45bd98adb0a34241..9ede0320e49c70b2488cd5ceb3e4b6965659aa74 100644
--- a/src/timers.c
+++ b/src/timers.c
@@ -61,7 +61,6 @@ const char* timers_names[timer_count] = {
     "dograv_mesh",
     "dograv_top_level",
     "dograv_long_range",
-    "dosource",
     "dosub_self_density",
     "dosub_self_gradient",
     "dosub_self_force",
@@ -80,12 +79,16 @@ const char* timers_names[timer_count] = {
     "dorecv_gpart",
     "dorecv_spart",
     "do_cooling",
+    "do_star_formation",
     "gettask",
     "qget",
     "qsteal",
     "locktree",
     "runners",
     "step",
+    "do_stars_ghost",
+    "logger",
+    "do_stars_sort",
 };
 
 /* File to store the timers */
diff --git a/src/timers.h b/src/timers.h
index 91d26c1c0d781f725b4c55a7ed3b6cfe956651df..3a2a939339e6d08b43836d4f5ca213af0822c2b2 100644
--- a/src/timers.h
+++ b/src/timers.h
@@ -62,7 +62,6 @@ enum {
   timer_dograv_mesh,
   timer_dograv_top_level,
   timer_dograv_long_range,
-  timer_dosource,
   timer_dosub_self_density,
   timer_dosub_self_gradient,
   timer_dosub_self_force,
@@ -81,12 +80,16 @@ enum {
   timer_dorecv_gpart,
   timer_dorecv_spart,
   timer_do_cooling,
+  timer_do_star_formation,
   timer_gettask,
   timer_qget,
   timer_qsteal,
   timer_locktree,
   timer_runners,
   timer_step,
+  timer_dostars_ghost,
+  timer_logger,
+  timer_do_stars_sort,
   timer_count,
 };
 
diff --git a/src/timestep.h b/src/timestep.h
index d065df4c444cb880a74688be97245c285a391817..b98ce06d5e69a2e2b5cb8503322c025dc69f92c7 100644
--- a/src/timestep.h
+++ b/src/timestep.h
@@ -58,6 +58,11 @@ make_integer_timestep(float new_dt, timebin_t old_bin, integertime_t ti_current,
   if (new_dti > current_dti) {
     if ((max_nr_timesteps - ti_end) % new_dti > 0) new_dti = current_dti;
   }
+
+#ifdef SWIFT_DEBUG_CHECKS
+  if (new_dti == 0) error("Computed an integer time-step of size 0");
+#endif
+
   return new_dti;
 }
 
@@ -121,8 +126,9 @@ __attribute__((always_inline)) INLINE static integertime_t get_part_timestep(
   /* Compute the next timestep (cooling condition) */
   float new_dt_cooling = FLT_MAX;
   if (e->policy & engine_policy_cooling)
-    new_dt_cooling = cooling_timestep(e->cooling_func, e->physical_constants,
-                                      e->cosmology, e->internal_units, p);
+    new_dt_cooling =
+        cooling_timestep(e->cooling_func, e->physical_constants, e->cosmology,
+                         e->internal_units, e->hydro_properties, p, xp);
 
   /* Compute the next timestep (gravity condition) */
   float new_dt_grav = FLT_MAX, new_dt_self_grav = FLT_MAX,
@@ -181,7 +187,7 @@ __attribute__((always_inline)) INLINE static integertime_t get_spart_timestep(
     const struct spart *restrict sp, const struct engine *restrict e) {
 
   /* Stellar time-step */
-  float new_dt_star = star_compute_timestep(sp);
+  float new_dt_stars = stars_compute_timestep(sp);
 
   /* Gravity time-step */
   float new_dt_self = FLT_MAX, new_dt_ext = FLT_MAX;
@@ -195,8 +201,14 @@ __attribute__((always_inline)) INLINE static integertime_t get_spart_timestep(
     new_dt_self = gravity_compute_timestep_self(
         sp->gpart, a_hydro, e->gravity_properties, e->cosmology);
 
+  /* Limit change in smoothing length */
+  const float dt_h_change = (sp->feedback.h_dt != 0.0f)
+                                ? fabsf(e->stars_properties->log_max_h_change *
+                                        sp->h / sp->feedback.h_dt)
+                                : FLT_MAX;
+
   /* Take the minimum of all */
-  float new_dt = min3(new_dt_star, new_dt_self, new_dt_ext);
+  float new_dt = min4(new_dt_stars, new_dt_self, new_dt_ext, dt_h_change);
 
   /* Apply the maximal displacement constraint (FLT_MAX  if non-cosmological)*/
   new_dt = min(new_dt, e->dt_max_RMS_displacement);
@@ -206,9 +218,10 @@ __attribute__((always_inline)) INLINE static integertime_t get_spart_timestep(
 
   /* Limit timestep within the allowed range */
   new_dt = min(new_dt, e->dt_max);
-  if (new_dt < e->dt_min)
+  if (new_dt < e->dt_min) {
     error("spart (id=%lld) wants a time-step (%e) below dt_min (%e)", sp->id,
           new_dt, e->dt_min);
+  }
 
   /* Convert to integer time */
   const integertime_t new_dti = make_integer_timestep(
diff --git a/src/timestep_limiter.h b/src/timestep_limiter.h
new file mode 100644
index 0000000000000000000000000000000000000000..db5e044132370c273f66aefd8e2d116642b28a73
--- /dev/null
+++ b/src/timestep_limiter.h
@@ -0,0 +1,143 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TIMESTEP_LIMITER_H
+#define SWIFT_TIMESTEP_LIMITER_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/**
+ * @brief Wakes up a particle by rewinding it's kick1 back in time and applying
+ * a new one such that the particle becomes active again in the next time-step.
+ *
+ * @param p The #part to update.
+ * @param xp Its #xpart companion.
+ * @param e The #engine (to extract time-line information).
+ */
+__attribute__((always_inline)) INLINE static integertime_t timestep_limit_part(
+    struct part *restrict p, struct xpart *restrict xp,
+    const struct engine *e) {
+
+  const struct cosmology *cosmo = e->cosmology;
+  const int with_cosmology = e->policy & engine_policy_cosmology;
+  const double time_base = e->time_base;
+
+  integertime_t old_ti_beg, old_ti_end;
+  timebin_t old_time_bin;
+
+  /* Let's see when this particle started and used to end */
+  if (p->wakeup == time_bin_awake) {
+
+    /* Normal case */
+    old_ti_beg = get_integer_time_begin(e->ti_current, p->time_bin);
+    old_ti_end = get_integer_time_end(e->ti_current, p->time_bin);
+    old_time_bin = p->time_bin;
+  } else {
+
+    /* Particle that was limited in the previous step already */
+    old_ti_beg = get_integer_time_begin(e->ti_current, -p->wakeup);
+    old_ti_end = get_integer_time_end(e->ti_current, p->time_bin);
+    old_time_bin = -p->wakeup;
+  }
+
+  const integertime_t old_dti = old_ti_end - old_ti_beg;
+
+  /* The new fake time-step the particle will be on */
+  const integertime_t new_fake_ti_step =
+      get_integer_timestep(e->min_active_bin);
+
+  /* The actual time-step size this particle will use */
+  const integertime_t new_ti_beg = old_ti_beg;
+  const integertime_t new_ti_end = e->ti_current + new_fake_ti_step;
+  const integertime_t new_dti = new_ti_end - new_ti_beg;
+
+#ifdef SWIFT_DEBUG_CHECKS
+  /* Some basic safety checks */
+  if (old_ti_beg >= e->ti_current)
+    error(
+        "Incorrect value for old time-step beginning ti_current=%lld, "
+        "old_ti_beg=%lld",
+        e->ti_current, old_ti_beg);
+
+  if (old_ti_end <= e->ti_current)
+    error(
+        "Incorrect value for old time-step end ti_current=%lld, "
+        "old_ti_end=%lld",
+        e->ti_current, old_ti_end);
+
+  if (new_ti_end > old_ti_end) error("New end of time-step after the old one");
+
+  if (new_dti > old_dti) error("New time-step larger than old one");
+
+  if (new_fake_ti_step == 0) error("Wakeup call too early");
+#endif
+
+  double dt_kick_grav = 0., dt_kick_hydro = 0., dt_kick_therm = 0.,
+         dt_kick_corr = 0.;
+
+  /* Now we need to reverse the kick1... (the dt are negative here) */
+  if (with_cosmology) {
+    dt_kick_hydro = -cosmology_get_hydro_kick_factor(cosmo, old_ti_beg,
+                                                     old_ti_beg + old_dti / 2);
+    dt_kick_grav = -cosmology_get_grav_kick_factor(cosmo, old_ti_beg,
+                                                   old_ti_beg + old_dti / 2);
+    dt_kick_therm = -cosmology_get_therm_kick_factor(cosmo, old_ti_beg,
+                                                     old_ti_beg + old_dti / 2);
+    dt_kick_corr = -cosmology_get_corr_kick_factor(cosmo, old_ti_beg,
+                                                   old_ti_beg + old_dti / 2);
+  } else {
+    dt_kick_hydro = -(old_dti / 2) * time_base;
+    dt_kick_grav = -(old_dti / 2) * time_base;
+    dt_kick_therm = -(old_dti / 2) * time_base;
+    dt_kick_corr = -(old_dti / 2) * time_base;
+  }
+  kick_part(p, xp, dt_kick_hydro, dt_kick_grav, dt_kick_therm, dt_kick_corr,
+            e->cosmology, e->hydro_properties, e->entropy_floor,
+            old_ti_beg + old_dti / 2, old_ti_beg);
+
+  /* ...and apply the new one (dt is positiive) */
+  if (with_cosmology) {
+    dt_kick_hydro = cosmology_get_hydro_kick_factor(cosmo, new_ti_beg,
+                                                    new_ti_beg + new_dti / 2);
+    dt_kick_grav = cosmology_get_grav_kick_factor(cosmo, new_ti_beg,
+                                                  new_ti_beg + new_dti / 2);
+    dt_kick_therm = cosmology_get_therm_kick_factor(cosmo, new_ti_beg,
+                                                    new_ti_beg + new_dti / 2);
+    dt_kick_corr = cosmology_get_corr_kick_factor(cosmo, new_ti_beg,
+                                                  new_ti_beg + new_dti / 2);
+  } else {
+    dt_kick_hydro = (new_dti / 2) * time_base;
+    dt_kick_grav = (new_dti / 2) * time_base;
+    dt_kick_therm = (new_dti / 2) * time_base;
+    dt_kick_corr = (new_dti / 2) * time_base;
+  }
+  kick_part(p, xp, dt_kick_hydro, dt_kick_grav, dt_kick_therm, dt_kick_corr,
+            e->cosmology, e->hydro_properties, e->entropy_floor, new_ti_beg,
+            new_ti_beg + new_dti / 2);
+
+  /* Remember the old time-bin */
+  p->wakeup = old_time_bin;
+
+  /* Update the time bin of this particle */
+  p->time_bin = e->min_active_bin;
+
+  return new_fake_ti_step;
+}
+
+#endif /* SWIFT_TIMESTEP_LIMITER_H */
diff --git a/src/tools.c b/src/tools.c
index 9c0df6012737872eef8d97521b3a7532ceb42720..43ac0177daef171850ea325f9fa23770fb82ae13 100644
--- a/src/tools.c
+++ b/src/tools.c
@@ -45,6 +45,8 @@
 #include "part.h"
 #include "periodic.h"
 #include "runner.h"
+#include "star_formation_iact.h"
+#include "stars.h"
 
 /**
  *  Factorize a given integer, attempts to keep larger pair of factors.
@@ -194,70 +196,151 @@ void pairs_all_density(struct runner *r, struct cell *ci, struct cell *cj) {
   const float H = cosmo->H;
 
   /* Implements a double-for loop and checks every interaction */
-  for (int i = 0; i < ci->count; ++i) {
+  for (int i = 0; i < ci->hydro.count; ++i) {
 
-    pi = &ci->parts[i];
+    pi = &ci->hydro.parts[i];
     hi = pi->h;
     hig2 = hi * hi * kernel_gamma2;
 
     /* Skip inactive particles. */
     if (!part_is_active(pi, e)) continue;
 
-    for (int j = 0; j < cj->count; ++j) {
+    for (int j = 0; j < cj->hydro.count; ++j) {
 
-      pj = &cj->parts[j];
+      pj = &cj->hydro.parts[j];
 
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dx[k] = ci->parts[i].x[k] - cj->parts[j].x[k];
+        dx[k] = ci->hydro.parts[i].x[k] - cj->hydro.parts[j].x[k];
         dx[k] = nearest(dx[k], dim[k]);
         r2 += dx[k] * dx[k];
       }
 
       /* Hit or miss? */
-      if (r2 < hig2) {
+      if (r2 < hig2 && !part_is_inhibited(pj, e)) {
 
         /* Interact */
         runner_iact_nonsym_density(r2, dx, hi, pj->h, pi, pj, a, H);
         runner_iact_nonsym_chemistry(r2, dx, hi, pj->h, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hi, pj->h, pi, pj, a, H);
       }
     }
   }
 
   /* Reverse double-for loop and checks every interaction */
-  for (int j = 0; j < cj->count; ++j) {
+  for (int j = 0; j < cj->hydro.count; ++j) {
 
-    pj = &cj->parts[j];
+    pj = &cj->hydro.parts[j];
     hj = pj->h;
     hjg2 = hj * hj * kernel_gamma2;
 
     /* Skip inactive particles. */
     if (!part_is_active(pj, e)) continue;
 
-    for (int i = 0; i < ci->count; ++i) {
+    for (int i = 0; i < ci->hydro.count; ++i) {
 
-      pi = &ci->parts[i];
+      pi = &ci->hydro.parts[i];
 
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dx[k] = cj->parts[j].x[k] - ci->parts[i].x[k];
+        dx[k] = cj->hydro.parts[j].x[k] - ci->hydro.parts[i].x[k];
         dx[k] = nearest(dx[k], dim[k]);
         r2 += dx[k] * dx[k];
       }
 
       /* Hit or miss? */
-      if (r2 < hjg2) {
+      if (r2 < hjg2 && !part_is_inhibited(pi, e)) {
 
         /* Interact */
         runner_iact_nonsym_density(r2, dx, hj, pi->h, pj, pi, a, H);
         runner_iact_nonsym_chemistry(r2, dx, hj, pi->h, pj, pi, a, H);
+        runner_iact_nonsym_star_formation(r2, dx, hj, pi->h, pj, pi, a, H);
       }
     }
   }
 }
 
+#ifdef EXTRA_HYDRO_LOOP
+void pairs_all_gradient(struct runner *r, struct cell *ci, struct cell *cj) {
+
+  float r2, hi, hj, hig2, hjg2, dx[3];
+  struct part *pi, *pj;
+  const double dim[3] = {r->e->s->dim[0], r->e->s->dim[1], r->e->s->dim[2]};
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Implements a double-for loop and checks every interaction */
+  for (int i = 0; i < ci->hydro.count; ++i) {
+
+    pi = &ci->hydro.parts[i];
+    hi = pi->h;
+    hig2 = hi * hi * kernel_gamma2;
+
+    /* Skip inactive particles. */
+    if (!part_is_active(pi, e)) continue;
+
+    for (int j = 0; j < cj->hydro.count; ++j) {
+
+      pj = &cj->hydro.parts[j];
+      hj = pj->h;
+      hjg2 = hj * hj * kernel_gamma2;
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dx[k] = ci->hydro.parts[i].x[k] - cj->hydro.parts[j].x[k];
+        dx[k] = nearest(dx[k], dim[k]);
+        r2 += dx[k] * dx[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 < hig2 && !part_is_inhibited(pj, e)) {
+
+        /* Interact */
+        runner_iact_nonsym_gradient(r2, dx, hi, hj, pi, pj, a, H);
+      }
+    }
+  }
+
+  /* Reverse double-for loop and checks every interaction */
+  for (int j = 0; j < cj->hydro.count; ++j) {
+
+    pj = &cj->hydro.parts[j];
+    hj = pj->h;
+    hjg2 = hj * hj * kernel_gamma2;
+
+    /* Skip inactive particles. */
+    if (!part_is_active(pj, e)) continue;
+
+    for (int i = 0; i < ci->hydro.count; ++i) {
+
+      pi = &ci->hydro.parts[i];
+      hi = pi->h;
+      hig2 = hi * hi * kernel_gamma2;
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dx[k] = cj->hydro.parts[j].x[k] - ci->hydro.parts[i].x[k];
+        dx[k] = nearest(dx[k], dim[k]);
+        r2 += dx[k] * dx[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 < hjg2 && !part_is_inhibited(pi, e)) {
+
+        /* Interact */
+        runner_iact_nonsym_gradient(r2, dx, hj, pi->h, pj, pi, a, H);
+      }
+    }
+  }
+}
+#endif /* EXTRA_HDYRO_LOOP */
+
 void pairs_all_force(struct runner *r, struct cell *ci, struct cell *cj) {
 
   float r2, hi, hj, hig2, hjg2, dx[3];
@@ -269,25 +352,25 @@ void pairs_all_force(struct runner *r, struct cell *ci, struct cell *cj) {
   const float H = cosmo->H;
 
   /* Implements a double-for loop and checks every interaction */
-  for (int i = 0; i < ci->count; ++i) {
+  for (int i = 0; i < ci->hydro.count; ++i) {
 
-    pi = &ci->parts[i];
+    pi = &ci->hydro.parts[i];
     hi = pi->h;
     hig2 = hi * hi * kernel_gamma2;
 
     /* Skip inactive particles. */
     if (!part_is_active(pi, e)) continue;
 
-    for (int j = 0; j < cj->count; ++j) {
+    for (int j = 0; j < cj->hydro.count; ++j) {
 
-      pj = &cj->parts[j];
+      pj = &cj->hydro.parts[j];
       hj = pj->h;
       hjg2 = hj * hj * kernel_gamma2;
 
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dx[k] = ci->parts[i].x[k] - cj->parts[j].x[k];
+        dx[k] = ci->hydro.parts[i].x[k] - cj->hydro.parts[j].x[k];
         dx[k] = nearest(dx[k], dim[k]);
         r2 += dx[k] * dx[k];
       }
@@ -302,25 +385,25 @@ void pairs_all_force(struct runner *r, struct cell *ci, struct cell *cj) {
   }
 
   /* Reverse double-for loop and checks every interaction */
-  for (int j = 0; j < cj->count; ++j) {
+  for (int j = 0; j < cj->hydro.count; ++j) {
 
-    pj = &cj->parts[j];
+    pj = &cj->hydro.parts[j];
     hj = pj->h;
     hjg2 = hj * hj * kernel_gamma2;
 
     /* Skip inactive particles. */
     if (!part_is_active(pj, e)) continue;
 
-    for (int i = 0; i < ci->count; ++i) {
+    for (int i = 0; i < ci->hydro.count; ++i) {
 
-      pi = &ci->parts[i];
+      pi = &ci->hydro.parts[i];
       hi = pi->h;
       hig2 = hi * hi * kernel_gamma2;
 
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dx[k] = cj->parts[j].x[k] - ci->parts[i].x[k];
+        dx[k] = cj->hydro.parts[j].x[k] - ci->hydro.parts[i].x[k];
         dx[k] = nearest(dx[k], dim[k]);
         r2 += dx[k] * dx[k];
       }
@@ -335,6 +418,77 @@ void pairs_all_force(struct runner *r, struct cell *ci, struct cell *cj) {
   }
 }
 
+void pairs_all_stars_density(struct runner *r, struct cell *ci,
+                             struct cell *cj) {
+
+  float r2, dx[3];
+  const double dim[3] = {r->e->s->dim[0], r->e->s->dim[1], r->e->s->dim[2]};
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Implements a double-for loop and checks every interaction */
+  for (int i = 0; i < ci->stars.count; ++i) {
+    struct spart *spi = &ci->stars.parts[i];
+
+    float hi = spi->h;
+    float hig2 = hi * hi * kernel_gamma2;
+
+    /* Skip inactive particles. */
+    if (!spart_is_active(spi, e)) continue;
+
+    for (int j = 0; j < cj->hydro.count; ++j) {
+
+      struct part *pj = &cj->hydro.parts[j];
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dx[k] = spi->x[k] - pj->x[k];
+        dx[k] = nearest(dx[k], dim[k]);
+        r2 += dx[k] * dx[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 < hig2) {
+        /* Interact */
+        runner_iact_nonsym_stars_density(r2, dx, hi, pj->h, spi, pj, a, H);
+      }
+    }
+  }
+
+  /* Reverse double-for loop and checks every interaction */
+  for (int j = 0; j < cj->stars.count; ++j) {
+
+    struct spart *spj = &cj->stars.parts[j];
+    float hj = spj->h;
+    float hjg2 = hj * hj * kernel_gamma2;
+
+    /* Skip inactive particles. */
+    if (!spart_is_active(spj, e)) continue;
+
+    for (int i = 0; i < ci->hydro.count; ++i) {
+
+      struct part *pi = &ci->hydro.parts[i];
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dx[k] = spj->x[k] - pi->x[k];
+        dx[k] = nearest(dx[k], dim[k]);
+        r2 += dx[k] * dx[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 < hjg2) {
+        /* Interact */
+        runner_iact_nonsym_stars_density(r2, dx, hj, pi->h, spj, pi, a, H);
+      }
+    }
+  }
+}
+
 void self_all_density(struct runner *r, struct cell *ci) {
   float r2, hi, hj, hig2, hjg2, dxi[3];  //, dxj[3];
   struct part *pi, *pj;
@@ -344,15 +498,15 @@ void self_all_density(struct runner *r, struct cell *ci) {
   const float H = cosmo->H;
 
   /* Implements a double-for loop and checks every interaction */
-  for (int i = 0; i < ci->count; ++i) {
+  for (int i = 0; i < ci->hydro.count; ++i) {
 
-    pi = &ci->parts[i];
+    pi = &ci->hydro.parts[i];
     hi = pi->h;
     hig2 = hi * hi * kernel_gamma2;
 
-    for (int j = i + 1; j < ci->count; ++j) {
+    for (int j = i + 1; j < ci->hydro.count; ++j) {
 
-      pj = &ci->parts[j];
+      pj = &ci->hydro.parts[j];
       hj = pj->h;
       hjg2 = hj * hj * kernel_gamma2;
 
@@ -361,20 +515,21 @@ void self_all_density(struct runner *r, struct cell *ci) {
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dxi[k] = ci->parts[i].x[k] - ci->parts[j].x[k];
+        dxi[k] = ci->hydro.parts[i].x[k] - ci->hydro.parts[j].x[k];
         r2 += dxi[k] * dxi[k];
       }
 
       /* Hit or miss? */
-      if (r2 < hig2 && part_is_active(pi, e)) {
+      if (r2 < hig2 && part_is_active(pi, e) && !part_is_inhibited(pj, e)) {
 
         /* Interact */
         runner_iact_nonsym_density(r2, dxi, hi, hj, pi, pj, a, H);
         runner_iact_nonsym_chemistry(r2, dxi, hi, hj, pi, pj, a, H);
+        runner_iact_nonsym_star_formation(r2, dxi, hi, hj, pi, pj, a, H);
       }
 
       /* Hit or miss? */
-      if (r2 < hjg2 && part_is_active(pj, e)) {
+      if (r2 < hjg2 && part_is_active(pj, e) && !part_is_inhibited(pi, e)) {
 
         dxi[0] = -dxi[0];
         dxi[1] = -dxi[1];
@@ -383,10 +538,64 @@ void self_all_density(struct runner *r, struct cell *ci) {
         /* Interact */
         runner_iact_nonsym_density(r2, dxi, hj, hi, pj, pi, a, H);
         runner_iact_nonsym_chemistry(r2, dxi, hj, hi, pj, pi, a, H);
+        runner_iact_nonsym_star_formation(r2, dxi, hj, hi, pj, pi, a, H);
+      }
+    }
+  }
+}
+
+#ifdef EXTRA_HYDRO_LOOP
+void self_all_gradient(struct runner *r, struct cell *ci) {
+  float r2, hi, hj, hig2, hjg2, dxi[3];  //, dxj[3];
+  struct part *pi, *pj;
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Implements a double-for loop and checks every interaction */
+  for (int i = 0; i < ci->hydro.count; ++i) {
+
+    pi = &ci->hydro.parts[i];
+    hi = pi->h;
+    hig2 = hi * hi * kernel_gamma2;
+
+    for (int j = i + 1; j < ci->hydro.count; ++j) {
+
+      pj = &ci->hydro.parts[j];
+      hj = pj->h;
+      hjg2 = hj * hj * kernel_gamma2;
+
+      if (pi == pj) continue;
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dxi[k] = ci->hydro.parts[i].x[k] - ci->hydro.parts[j].x[k];
+        r2 += dxi[k] * dxi[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 < hig2 && part_is_active(pi, e) && !part_is_inhibited(pj, e)) {
+
+        /* Interact */
+        runner_iact_nonsym_gradient(r2, dxi, hi, hj, pi, pj, a, H);
+      }
+
+      /* Hit or miss? */
+      if (r2 < hjg2 && part_is_active(pj, e) && !part_is_inhibited(pi, e)) {
+
+        dxi[0] = -dxi[0];
+        dxi[1] = -dxi[1];
+        dxi[2] = -dxi[2];
+
+        /* Interact */
+        runner_iact_nonsym_gradient(r2, dxi, hj, hi, pj, pi, a, H);
       }
     }
   }
 }
+#endif /* EXTRA_HYDRO_LOOP */
 
 void self_all_force(struct runner *r, struct cell *ci) {
   float r2, hi, hj, hig2, hjg2, dxi[3];  //, dxj[3];
@@ -397,15 +606,15 @@ void self_all_force(struct runner *r, struct cell *ci) {
   const float H = cosmo->H;
 
   /* Implements a double-for loop and checks every interaction */
-  for (int i = 0; i < ci->count; ++i) {
+  for (int i = 0; i < ci->hydro.count; ++i) {
 
-    pi = &ci->parts[i];
+    pi = &ci->hydro.parts[i];
     hi = pi->h;
     hig2 = hi * hi * kernel_gamma2;
 
-    for (int j = i + 1; j < ci->count; ++j) {
+    for (int j = i + 1; j < ci->hydro.count; ++j) {
 
-      pj = &ci->parts[j];
+      pj = &ci->hydro.parts[j];
       hj = pj->h;
       hjg2 = hj * hj * kernel_gamma2;
 
@@ -414,7 +623,7 @@ void self_all_force(struct runner *r, struct cell *ci) {
       /* Pairwise distance */
       r2 = 0.0f;
       for (int k = 0; k < 3; k++) {
-        dxi[k] = ci->parts[i].x[k] - ci->parts[j].x[k];
+        dxi[k] = ci->hydro.parts[i].x[k] - ci->hydro.parts[j].x[k];
         r2 += dxi[k] * dxi[k];
       }
 
@@ -428,6 +637,45 @@ void self_all_force(struct runner *r, struct cell *ci) {
   }
 }
 
+void self_all_stars_density(struct runner *r, struct cell *ci) {
+  float r2, hi, hj, hig2, dxi[3];
+  struct spart *spi;
+  struct part *pj;
+  const struct engine *e = r->e;
+  const struct cosmology *cosmo = e->cosmology;
+  const float a = cosmo->a;
+  const float H = cosmo->H;
+
+  /* Implements a double-for loop and checks every interaction */
+  for (int i = 0; i < ci->stars.count; ++i) {
+
+    spi = &ci->stars.parts[i];
+    hi = spi->h;
+    hig2 = hi * hi * kernel_gamma2;
+
+    if (!spart_is_active(spi, e)) continue;
+
+    for (int j = 0; j < ci->hydro.count; ++j) {
+
+      pj = &ci->hydro.parts[j];
+      hj = pj->h;
+
+      /* Pairwise distance */
+      r2 = 0.0f;
+      for (int k = 0; k < 3; k++) {
+        dxi[k] = spi->x[k] - pj->x[k];
+        r2 += dxi[k] * dxi[k];
+      }
+
+      /* Hit or miss? */
+      if (r2 > 0.f && r2 < hig2) {
+        /* Interact */
+        runner_iact_nonsym_stars_density(r2, dxi, hi, hj, spi, pj, a, H);
+      }
+    }
+  }
+}
+
 /**
  * @brief Compute the force on a single particle brute-force.
  */
@@ -544,6 +792,23 @@ void shuffle_particles(struct part *parts, const int count) {
   }
 }
 
+/**
+ * @brief Randomly shuffle an array of sparticles.
+ */
+void shuffle_sparticles(struct spart *sparts, const int scount) {
+  if (scount > 1) {
+    for (int i = 0; i < scount - 1; i++) {
+      int j = i + random_uniform(0., (double)(scount - 1 - i));
+
+      struct spart sparticle = sparts[j];
+
+      sparts[j] = sparts[i];
+
+      sparts[i] = sparticle;
+    }
+  }
+}
+
 /**
  * @brief Compares two values based on their relative difference: |a - b|/|a +
  * b|
@@ -586,7 +851,7 @@ int compare_values(double a, double b, double threshold, double *absDiff,
  *
  * @return 1 if difference found, 0 otherwise
  */
-int compare_particles(struct part a, struct part b, double threshold) {
+int compare_particles(struct part *a, struct part *b, double threshold) {
 
 #ifdef GADGET2_SPH
 
@@ -594,117 +859,117 @@ int compare_particles(struct part a, struct part b, double threshold) {
   double absDiff = 0.0, absSum = 0.0, relDiff = 0.0;
 
   for (int k = 0; k < 3; k++) {
-    if (compare_values(a.x[k], b.x[k], threshold, &absDiff, &absSum,
+    if (compare_values(a->x[k], b->x[k], threshold, &absDiff, &absSum,
                        &relDiff)) {
       message(
           "Relative difference (%e) larger than tolerance (%e) for x[%d] of "
           "particle %lld.",
-          relDiff, threshold, k, a.id);
-      message("a = %e, b = %e", a.x[k], b.x[k]);
+          relDiff, threshold, k, a->id);
+      message("a = %e, b = %e", a->x[k], b->x[k]);
       result = 1;
     }
   }
   for (int k = 0; k < 3; k++) {
-    if (compare_values(a.v[k], b.v[k], threshold, &absDiff, &absSum,
+    if (compare_values(a->v[k], b->v[k], threshold, &absDiff, &absSum,
                        &relDiff)) {
       message(
           "Relative difference (%e) larger than tolerance (%e) for v[%d] of "
           "particle %lld.",
-          relDiff, threshold, k, a.id);
-      message("a = %e, b = %e", a.v[k], b.v[k]);
+          relDiff, threshold, k, a->id);
+      message("a = %e, b = %e", a->v[k], b->v[k]);
       result = 1;
     }
   }
   for (int k = 0; k < 3; k++) {
-    if (compare_values(a.a_hydro[k], b.a_hydro[k], threshold, &absDiff, &absSum,
-                       &relDiff)) {
+    if (compare_values(a->a_hydro[k], b->a_hydro[k], threshold, &absDiff,
+                       &absSum, &relDiff)) {
       message(
           "Relative difference (%e) larger than tolerance (%e) for a_hydro[%d] "
           "of particle %lld.",
-          relDiff, threshold, k, a.id);
-      message("a = %e, b = %e", a.a_hydro[k], b.a_hydro[k]);
+          relDiff, threshold, k, a->id);
+      message("a = %e, b = %e", a->a_hydro[k], b->a_hydro[k]);
       result = 1;
     }
   }
-  if (compare_values(a.rho, b.rho, threshold, &absDiff, &absSum, &relDiff)) {
+  if (compare_values(a->rho, b->rho, threshold, &absDiff, &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for rho of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.rho, b.rho);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->rho, b->rho);
     result = 1;
   }
-  if (compare_values(a.density.rho_dh, b.density.rho_dh, threshold, &absDiff,
+  if (compare_values(a->density.rho_dh, b->density.rho_dh, threshold, &absDiff,
                      &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for rho_dh of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.density.rho_dh, b.density.rho_dh);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->density.rho_dh, b->density.rho_dh);
     result = 1;
   }
-  if (compare_values(a.density.wcount, b.density.wcount, threshold, &absDiff,
+  if (compare_values(a->density.wcount, b->density.wcount, threshold, &absDiff,
                      &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for wcount of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.density.wcount, b.density.wcount);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->density.wcount, b->density.wcount);
     result = 1;
   }
-  if (compare_values(a.density.wcount_dh, b.density.wcount_dh, threshold,
+  if (compare_values(a->density.wcount_dh, b->density.wcount_dh, threshold,
                      &absDiff, &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for wcount_dh of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.density.wcount_dh, b.density.wcount_dh);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->density.wcount_dh, b->density.wcount_dh);
     result = 1;
   }
-  if (compare_values(a.force.h_dt, b.force.h_dt, threshold, &absDiff, &absSum,
+  if (compare_values(a->force.h_dt, b->force.h_dt, threshold, &absDiff, &absSum,
                      &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for h_dt of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.force.h_dt, b.force.h_dt);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->force.h_dt, b->force.h_dt);
     result = 1;
   }
-  if (compare_values(a.force.v_sig, b.force.v_sig, threshold, &absDiff, &absSum,
-                     &relDiff)) {
+  if (compare_values(a->force.v_sig, b->force.v_sig, threshold, &absDiff,
+                     &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for v_sig of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.force.v_sig, b.force.v_sig);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->force.v_sig, b->force.v_sig);
     result = 1;
   }
-  if (compare_values(a.entropy_dt, b.entropy_dt, threshold, &absDiff, &absSum,
+  if (compare_values(a->entropy_dt, b->entropy_dt, threshold, &absDiff, &absSum,
                      &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for entropy_dt of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.entropy_dt, b.entropy_dt);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->entropy_dt, b->entropy_dt);
     result = 1;
   }
-  if (compare_values(a.density.div_v, b.density.div_v, threshold, &absDiff,
+  if (compare_values(a->density.div_v, b->density.div_v, threshold, &absDiff,
                      &absSum, &relDiff)) {
     message(
         "Relative difference (%e) larger than tolerance (%e) for div_v of "
         "particle %lld.",
-        relDiff, threshold, a.id);
-    message("a = %e, b = %e", a.density.div_v, b.density.div_v);
+        relDiff, threshold, a->id);
+    message("a = %e, b = %e", a->density.div_v, b->density.div_v);
     result = 1;
   }
   for (int k = 0; k < 3; k++) {
-    if (compare_values(a.density.rot_v[k], b.density.rot_v[k], threshold,
+    if (compare_values(a->density.rot_v[k], b->density.rot_v[k], threshold,
                        &absDiff, &absSum, &relDiff)) {
       message(
           "Relative difference (%e) larger than tolerance (%e) for rot_v[%d] "
           "of particle %lld.",
-          relDiff, threshold, k, a.id);
-      message("a = %e, b = %e", a.density.rot_v[k], b.density.rot_v[k]);
+          relDiff, threshold, k, a->id);
+      message("a = %e, b = %e", a->density.rot_v[k], b->density.rot_v[k]);
       result = 1;
     }
   }
diff --git a/src/tools.h b/src/tools.h
index 25d024679174eabbe89908c0254651e4bbc69e15..a34904bcbb7af1ce15408a376f957f0f72cd327c 100644
--- a/src/tools.h
+++ b/src/tools.h
@@ -38,19 +38,25 @@ void pairs_single_density(double *dim, long long int pid,
 
 void pairs_all_density(struct runner *r, struct cell *ci, struct cell *cj);
 void self_all_density(struct runner *r, struct cell *ci);
+void pairs_all_gradient(struct runner *r, struct cell *ci, struct cell *cj);
+void self_all_gradient(struct runner *r, struct cell *ci);
 void pairs_all_force(struct runner *r, struct cell *ci, struct cell *cj);
 void self_all_force(struct runner *r, struct cell *ci);
+void pairs_all_stars_density(struct runner *r, struct cell *ci,
+                             struct cell *cj);
+void self_all_stars_density(struct runner *r, struct cell *ci);
 
 void pairs_n2(double *dim, struct part *restrict parts, int N, int periodic);
 
 double random_uniform(double a, double b);
 void shuffle_particles(struct part *parts, const int count);
+void shuffle_sparticles(struct spart *sparts, const int scount);
 void gravity_n2(struct gpart *gparts, const int gcount,
                 const struct phys_const *constants,
                 const struct gravity_props *gravity_properties, float rlr);
 int compare_values(double a, double b, double threshold, double *absDiff,
                    double *absSum, double *relDiff);
-int compare_particles(struct part a, struct part b, double threshold);
+int compare_particles(struct part *a, struct part *b, double threshold);
 
 long get_maxrss(void);
 
diff --git a/src/stars/Default/star_part.h b/src/tracers.h
similarity index 52%
rename from src/stars/Default/star_part.h
rename to src/tracers.h
index 68dd4869c257e35b3be7dc21f36e6dcdb725dc17..888d30af1172e1b8b639f8826b68067874a4f63a 100644
--- a/src/stars/Default/star_part.h
+++ b/src/tracers.h
@@ -1,6 +1,6 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published
@@ -16,47 +16,24 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_DEFAULT_STAR_PART_H
-#define SWIFT_DEFAULT_STAR_PART_H
-
-/* Some standard headers. */
-#include <stdlib.h>
+#ifndef SWIFT_TRACERS_H
+#define SWIFT_TRACERS_H
 
 /**
- * @brief Particle fields for the star particles.
- *
- * All quantities related to gravity are stored in the associate #gpart.
+ * @file src/tracers.h
+ * @brief Branches between the different particle data tracers
  */
-struct spart {
-
-  /*! Particle ID. */
-  long long id;
-
-  /*! Pointer to corresponding gravity part. */
-  struct gpart* gpart;
-
-  /*! Particle position. */
-  double x[3];
-
-  /*! Particle velocity. */
-  float v[3];
 
-  /*! Star mass */
-  float mass;
-
-  /*! Particle time bin */
-  timebin_t time_bin;
-
-#ifdef SWIFT_DEBUG_CHECKS
-
-  /* Time of the last drift */
-  integertime_t ti_drift;
-
-  /* Time of the last kick */
-  integertime_t ti_kick;
+/* Config parameters. */
+#include "../config.h"
 
+/* Import the right cooling definition */
+#if defined(TRACERS_NONE)
+#include "./tracers/none/tracers.h"
+#elif defined(TRACERS_EAGLE)
+#include "./tracers/EAGLE/tracers.h"
+#else
+#error "Invalid choice of tracers."
 #endif
 
-} SWIFT_STRUCT_ALIGN;
-
-#endif /* SWIFT_DEFAULT_STAR_PART_H */
+#endif /* SWIFT_TRACERS_H */
diff --git a/src/tracers/EAGLE/tracers.h b/src/tracers/EAGLE/tracers.h
new file mode 100644
index 0000000000000000000000000000000000000000..706a8ad9916bec287ad7aad4853b063d44cc37da
--- /dev/null
+++ b/src/tracers/EAGLE/tracers.h
@@ -0,0 +1,140 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_EAGLE_H
+#define SWIFT_TRACERS_EAGLE_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "cooling.h"
+#include "part.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Update the particle tracers just after it has been initialised at the
+ * start of a step.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param with_cosmology Are we running a cosmological simulation?
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param time The current time.
+ */
+static INLINE void tracers_after_init(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {}
+
+/**
+ * @brief Update the particle tracers just after it has been drifted.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param with_cosmology Are we running a cosmological simulation?
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param time The current time.
+ */
+static INLINE void tracers_after_drift(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {}
+
+/**
+ * @brief Update the particle tracers just after its time-step has been
+ * computed.
+ *
+ * In EAGLE we record the highest temperature reached.
+ *
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param with_cosmology Are we running a cosmological simulation?
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param time The current time.
+ */
+static INLINE void tracers_after_timestep(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {
+
+  /* Current temperature */
+  const float temperature = cooling_get_temperature(phys_const, hydro_props, us,
+                                                    cosmo, cooling, p, xp);
+
+  /* New record? */
+  if (temperature > xp->tracers_data.maximum_temperature) {
+
+    xp->tracers_data.maximum_temperature = temperature;
+
+    if (with_cosmology) {
+      xp->tracers_data.maximum_temperature_scale_factor = cosmo->a;
+    } else {
+      xp->tracers_data.maximum_temperature_time = time;
+    }
+  }
+}
+
+/**
+ * @brief Update the particle tracers just after its time-step has been
+ * computed.
+ *
+ * Set the maximal temperature to a valid initial state
+ *
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ */
+static INLINE void tracers_first_init_xpart(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const struct cosmology *cosmo,
+    const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling) {
+
+  xp->tracers_data.maximum_temperature = -1.f;
+  xp->tracers_data.maximum_temperature_time = -1.f;
+}
+
+#endif /* SWIFT_TRACERS_EAGLE_H */
diff --git a/src/tracers/EAGLE/tracers_io.h b/src/tracers/EAGLE/tracers_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..038cc1c8d3f92c2105d5b5c3ead958f60486ce9f
--- /dev/null
+++ b/src/tracers/EAGLE/tracers_io.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_EAGLE_IO_H
+#define SWIFT_TRACERS_EAGLE_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "io_properties.h"
+#include "tracers.h"
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of tracers to the file.
+ *
+ * @param h_grp The HDF5 group in which to write
+ */
+__attribute__((always_inline)) INLINE static void tracers_write_flavour(
+    hid_t h_grp) {
+
+  io_write_attribute_s(h_grp, "Tracers", "EAGLE");
+}
+#endif
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended data particle array.
+ * @param list The list of i/o properties to write.
+ * @param with_cosmology Are we running with cosmology switched on?
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int tracers_write_particles(
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
+    const int with_cosmology) {
+
+  list[0] = io_make_output_field("Maximal Temperature", FLOAT, 1,
+                                 UNIT_CONV_TEMPERATURE, xparts,
+                                 tracers_data.maximum_temperature);
+
+  if (with_cosmology) {
+    list[1] = io_make_output_field(
+        "Maximal Temperature scale-factor", FLOAT, 1, UNIT_CONV_NO_UNITS,
+        xparts, tracers_data.maximum_temperature_scale_factor);
+
+  } else {
+
+    list[1] = io_make_output_field("Maximal Temperature time", FLOAT, 1,
+                                   UNIT_CONV_NO_UNITS, xparts,
+                                   tracers_data.maximum_temperature_time);
+  }
+
+  return 2;
+}
+
+__attribute__((always_inline)) INLINE static int tracers_write_sparticles(
+    const struct spart* sparts, struct io_props* list,
+    const int with_cosmology) {
+
+  list[0] = io_make_output_field("Maximal Temperature", FLOAT, 1,
+                                 UNIT_CONV_TEMPERATURE, sparts,
+                                 tracers_data.maximum_temperature);
+
+  if (with_cosmology) {
+    list[1] = io_make_output_field(
+        "Maximal Temperature scale-factor", FLOAT, 1, UNIT_CONV_NO_UNITS,
+        sparts, tracers_data.maximum_temperature_scale_factor);
+
+  } else {
+
+    list[1] = io_make_output_field("Maximal Temperature time", FLOAT, 1,
+                                   UNIT_CONV_NO_UNITS, sparts,
+                                   tracers_data.maximum_temperature_time);
+  }
+
+  return 2;
+}
+#endif /* SWIFT_TRACERS_EAGLE_IO_H */
diff --git a/src/tracers/EAGLE/tracers_struct.h b/src/tracers/EAGLE/tracers_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..d893c85bcb65b625743f3cec603560d65efa472d
--- /dev/null
+++ b/src/tracers/EAGLE/tracers_struct.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_STRUCT_EAGLE_H
+#define SWIFT_TRACERS_STRUCT_EAGLE_H
+
+/**
+ * @brief Properties of the tracers stored in the extended particle data.
+ */
+struct tracers_xpart_data {
+
+  /*! Maximum temperature achieved by this particle */
+  float maximum_temperature;
+
+  /*! Anonymous union for the cosmological non-cosmological runs distinction */
+  union {
+
+    /*! Scale-factor at which the maximal temperature was reached */
+    float maximum_temperature_scale_factor;
+
+    /*! Time at which the maximal temperature was reached */
+    float maximum_temperature_time;
+  };
+};
+
+#endif /* SWIFT_TRACERS_STRUCT_EAGLE_H */
diff --git a/src/tracers/none/tracers.h b/src/tracers/none/tracers.h
new file mode 100644
index 0000000000000000000000000000000000000000..4cf2fb4ad4380139a392d5d76a0f78162aa6eac9
--- /dev/null
+++ b/src/tracers/none/tracers.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_NONE_H
+#define SWIFT_TRACERS_NONE_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "cooling.h"
+#include "part.h"
+#include "tracers_struct.h"
+
+/**
+ * @brief Update the particle tracers just after it has been initialised at the
+ * start of a step.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ */
+static INLINE void tracers_after_init(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {}
+
+/**
+ * @brief Update the particle tracers just after it has been drifted.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ */
+static INLINE void tracers_after_drift(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {}
+
+/**
+ * @brief Update the particle tracers just after its time-step has been
+ * computed.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ */
+static INLINE void tracers_after_timestep(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const int with_cosmology,
+    const struct cosmology *cosmo, const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling, const double time) {}
+
+/**
+ * @brief Update the particle tracers just after its time-step has been
+ * computed.
+ *
+ * Nothing to do here in the EAGLE model.
+ *
+ * @param us The internal system of units.
+ * @param phys_const The physical constants in internal units.
+ * @param cosmo The current cosmological model.
+ * @param hydro_props the hydro_props struct
+ * @param cooling The #cooling_function_data used in the run.
+ * @param p Pointer to the particle data.
+ * @param xp Pointer to the extended particle data (containing the tracers
+ * struct).
+ */
+static INLINE void tracers_first_init_xpart(
+    const struct part *p, struct xpart *xp, const struct unit_system *us,
+    const struct phys_const *phys_const, const struct cosmology *cosmo,
+    const struct hydro_props *hydro_props,
+    const struct cooling_function_data *cooling) {}
+
+#endif /* SWIFT_TRACERS_NONE_H */
diff --git a/src/tracers/none/tracers_io.h b/src/tracers/none/tracers_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..b58e5f74aea211214b45c07668c03b461c088a99
--- /dev/null
+++ b/src/tracers/none/tracers_io.h
@@ -0,0 +1,66 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_NONE_IO_H
+#define SWIFT_TRACERS_NONE_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes */
+#include "io_properties.h"
+#include "tracers.h"
+
+#ifdef HAVE_HDF5
+
+/**
+ * @brief Writes the current model of tracers to the file.
+ *
+ * @param h_grp The HDF5 group in which to write
+ * @param tracers The #tracers_function_data
+ */
+__attribute__((always_inline)) INLINE static void tracers_write_flavour(
+    hid_t h_grp) {
+
+  io_write_attribute_s(h_grp, "Tracers", "none");
+}
+#endif
+
+/**
+ * @brief Specifies which particle fields to write to a dataset
+ *
+ * @param parts The particle array.
+ * @param xparts The extended data particle array.
+ * @param list The list of i/o properties to write.
+ *
+ * @return Returns the number of fields to write.
+ */
+__attribute__((always_inline)) INLINE static int tracers_write_particles(
+    const struct part* parts, const struct xpart* xparts, struct io_props* list,
+    const int with_cosmology) {
+
+  return 0;
+}
+
+__attribute__((always_inline)) INLINE static int tracers_write_sparticles(
+    const struct spart* sparts, struct io_props* list,
+    const int with_cosmology) {
+
+  return 0;
+}
+#endif /* SWIFT_TRACERS_NONE_IO_H */
diff --git a/src/sourceterms_struct.h b/src/tracers/none/tracers_struct.h
similarity index 74%
rename from src/sourceterms_struct.h
rename to src/tracers/none/tracers_struct.h
index b3c38986db52d72df825fda97b36c985dff922b6..b13539917eef888ade3f9056e245ebb7f963f4d1 100644
--- a/src/sourceterms_struct.h
+++ b/src/tracers/none/tracers_struct.h
@@ -1,6 +1,6 @@
 /*******************************************************************************
  * This file is part of SWIFT.
- * Coypright (c) 2015 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published
@@ -16,11 +16,12 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
-#ifndef SWIFT_SOURCETERMS_STRUCT_H
-#define SWIFT_SOURCETERMS_STRUCT_H
-#include "./const.h"
-#ifdef SOURCETERMS_SN_FEEDBACK
-#include "sourceterms/sn_feedback/sn_feedback_struct.h"
-#endif
+#ifndef SWIFT_TRACERS_STRUCT_NONE_H
+#define SWIFT_TRACERS_STRUCT_NONE_H
 
-#endif /*  SWIFT_SOURCETERMS_STRUCT_H */
+/**
+ * @brief Properties of the tracers stored in the extended particle data.
+ */
+struct tracers_xpart_data {};
+
+#endif /* SWIFT_TRACERS_STRUCT_NONE_H */
diff --git a/src/tracers_io.h b/src/tracers_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..87ca8bb1ffa22017aa825c2760fe42eca4c2888b
--- /dev/null
+++ b/src/tracers_io.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_IO_H
+#define SWIFT_TRACERS_IO_H
+
+/**
+ * @file src/tracers_io.h
+ * @brief Branches between the different particle data tracers
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right cooling definition */
+#if defined(TRACERS_NONE)
+#include "./tracers/none/tracers_io.h"
+#elif defined(TRACERS_EAGLE)
+#include "./tracers/EAGLE/tracers_io.h"
+#else
+#error "Invalid choice of tracers."
+#endif
+
+#endif /* SWIFT_TRACERS_IO_H */
diff --git a/src/tracers_struct.h b/src/tracers_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..48af73c2e577f7df3b0fed291b1517692f6437bd
--- /dev/null
+++ b/src/tracers_struct.h
@@ -0,0 +1,39 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_TRACERS_STRUCT_H
+#define SWIFT_TRACERS_STRUCT_H
+
+/**
+ * @file src/tracers_struct.h
+ * @brief Branches between the different particle data tracers
+ */
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Import the right cooling definition */
+#if defined(TRACERS_NONE)
+#include "./tracers/none/tracers_struct.h"
+#elif defined(TRACERS_EAGLE)
+#include "./tracers/EAGLE/tracers_struct.h"
+#else
+#error "Invalid choice of tracers."
+#endif
+
+#endif /* SWIFT_TRACERS_STRUCT_H */
diff --git a/src/units.c b/src/units.c
index 04e74bc4d7040ed1bde73184b125eec5d8a7fe97..1194735aafd80d51897e4c4cb3e4b0976478145a 100644
--- a/src/units.c
+++ b/src/units.c
@@ -244,6 +244,7 @@ void units_get_base_unit_exponants_array(float baseUnitsExp[5],
       break;
 
     case UNIT_CONV_FREQUENCY:
+    case UNIT_CONV_SSFR:
       baseUnitsExp[UNIT_TIME] = -1.f;
       break;
 
@@ -253,6 +254,7 @@ void units_get_base_unit_exponants_array(float baseUnitsExp[5],
       break;
 
     case UNIT_CONV_SPEED:
+    case UNIT_CONV_VELOCITY:
       baseUnitsExp[UNIT_LENGTH] = 1.f;
       baseUnitsExp[UNIT_TIME] = -1.f;
       break;
@@ -370,9 +372,15 @@ void units_get_base_unit_exponants_array(float baseUnitsExp[5],
       break;
 
     case UNIT_CONV_INV_VOLUME:
+    case UNIT_CONV_NUMBER_DENSITY:
       baseUnitsExp[UNIT_LENGTH] = -3.f;
       break;
 
+    case UNIT_CONV_SFR:
+      baseUnitsExp[UNIT_MASS] = 1.f;
+      baseUnitsExp[UNIT_TIME] = -1.f;
+      break;
+
     default:
       error("Invalid choice of pre-defined units");
       break;
@@ -490,6 +498,10 @@ float units_general_a_factor(const struct unit_system* us,
 /**
  * @brief Returns a string containing the exponents of the base units making up
  * the conversion factors (expressed in terms of the 5 fundamental units)
+ *
+ * Note that in accordance with the SWIFT philosphy, there are no h-factors
+ * in any units and hence in the string returned here.
+ *
  * @param buffer The buffer in which to write (The buffer must be long enough,
  * 140 chars at most)
  * @param us The UnitsSystem in use.
@@ -501,7 +513,7 @@ void units_general_cgs_conversion_string(char* buffer,
                                          const float baseUnitsExponants[5]) {
   char temp[20];
   const double a_exp = units_general_a_factor(us, baseUnitsExponants);
-  const double h_exp = units_general_h_factor(us, baseUnitsExponants);
+  const double h_exp = 0.; /* There are no h-factors in SWIFT outputs. */
 
   /* Check whether we are unitless or not */
   char isAllNonZero = 1;
diff --git a/src/units.h b/src/units.h
index 08b738c5303db8b40dfbe51799d67da8df3936ce..62669425e52c4e39800330a4150259856d8fc0bb 100644
--- a/src/units.h
+++ b/src/units.h
@@ -71,7 +71,9 @@ enum unit_conversion_factor {
   UNIT_CONV_LENGTH,
   UNIT_CONV_TIME,
   UNIT_CONV_DENSITY,
+  UNIT_CONV_NUMBER_DENSITY,
   UNIT_CONV_SPEED,
+  UNIT_CONV_VELOCITY,
   UNIT_CONV_ACCELERATION,
   UNIT_CONV_POTENTIAL,
   UNIT_CONV_FORCE,
@@ -92,7 +94,9 @@ enum unit_conversion_factor {
   UNIT_CONV_MAGNETIC_INDUCTANCE,
   UNIT_CONV_TEMPERATURE,
   UNIT_CONV_VOLUME,
-  UNIT_CONV_INV_VOLUME
+  UNIT_CONV_INV_VOLUME,
+  UNIT_CONV_SFR,
+  UNIT_CONV_SSFR
 };
 
 void units_init_cgs(struct unit_system*);
diff --git a/src/velociraptor_dummy.c b/src/velociraptor_dummy.c
new file mode 100644
index 0000000000000000000000000000000000000000..36cb65bfbe6931464f33d7e4b641f8882fdf65d0
--- /dev/null
+++ b/src/velociraptor_dummy.c
@@ -0,0 +1,73 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2018 James Willis (james.s.willis@durham.ac.uk)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Local includes. */
+#include "error.h"
+#include "swift_velociraptor_part.h"
+#include "velociraptor_interface.h"
+
+/* Dummy VELOCIraptor interface for testing compilation without linking the
+ * actual VELOCIraptor library. */
+#ifdef HAVE_DUMMY_VELOCIRAPTOR
+struct cosmoinfo {};
+struct unitinfo {};
+struct cell_loc {};
+struct siminfo {};
+
+/*
+int InitVelociraptor(char *config_name, char *output_name,
+                     struct cosmoinfo cosmo_info, struct unitinfo unit_info,
+                     struct siminfo sim_info, const int numthreads) {
+
+  error("This is only a dummy. Call the real one!");
+  return 0;
+}
+
+int InvokeVelociraptor(const size_t num_gravity_parts,
+                       const size_t num_hydro_parts, const int snapnum,
+                       struct swift_vel_part *swift_parts,
+                       const int *cell_node_ids, char *output_name,
+                       const int numthreads) {
+
+  error("This is only a dummy. Call the real one!");
+  return 0;
+}
+*/
+int InitVelociraptor(char *config_name, struct unitinfo unit_info,
+                     struct siminfo sim_info, const int numthreads) {
+
+  error("This is only a dummy. Call the real one!");
+  return 0;
+}
+
+struct groupinfo *InvokeVelociraptor(
+    const int snapnum, char *output_name, struct cosmoinfo cosmo_info,
+    struct siminfo sim_info, const size_t num_gravity_parts,
+    const size_t num_hydro_parts, const size_t num_star_parts,
+    struct swift_vel_part *swift_parts, const int *cell_node_ids,
+    const int numthreads, const int return_group_flags,
+    int *const num_in_groups) {
+  error("This is only a dummy. Call the real one!");
+  return 0;
+}
+
+#endif /* HAVE_DUMMY_VELOCIRAPTOR */
diff --git a/src/velociraptor_interface.c b/src/velociraptor_interface.c
index d7331ce49f102f52adafff1364dce173fc247586..47c107d047e3abfcac32571f780cf52649a9c38d 100644
--- a/src/velociraptor_interface.c
+++ b/src/velociraptor_interface.c
@@ -21,153 +21,353 @@
 #include "../config.h"
 
 /* Some standard headers. */
-#include <errno.h>
 #include <unistd.h>
 
 /* This object's header. */
 #include "velociraptor_interface.h"
 
 /* Local includes. */
-#include "common_io.h"
+#include "cooling.h"
 #include "engine.h"
 #include "hydro.h"
 #include "swift_velociraptor_part.h"
+#include "velociraptor_struct.h"
 
 #ifdef HAVE_VELOCIRAPTOR
 
-/* VELOCIraptor interface. */
-int InitVelociraptor(char *config_name, char *output_name,
-                     struct cosmoinfo cosmo_info, struct unitinfo unit_info,
-                     struct siminfo sim_info);
-int InvokeVelociraptor(const size_t num_gravity_parts,
-                       const size_t num_hydro_parts,
-                       struct swift_vel_part *swift_parts,
-                       const int *cell_node_ids, char *output_name);
+/**
+ * @brief Structure for passing cosmological information to VELOCIraptor.
+ */
+struct cosmoinfo {
+
+  /*! Current expansion factor of the Universe. (cosmology.a) */
+  double atime;
+
+  /*! Reduced Hubble constant (H0 / (100km/s/Mpc) (cosmology.h) */
+  double littleh;
+
+  /*! Matter density parameter (cosmology.Omega_m) */
+  double Omega_m;
+
+  /*! Radiation density parameter (cosmology.Omega_r) */
+  double Omega_r;
+
+  /*! Neutrino density parameter (0 in SWIFT) */
+  double Omega_nu;
+
+  /*! Neutrino density parameter (cosmology.Omega_k) */
+  double Omega_k;
+
+  /*! Baryon density parameter (cosmology.Omega_b) */
+  double Omega_b;
+
+  /*! Radiation constant density parameter (cosmology.Omega_lambda) */
+  double Omega_Lambda;
+
+  /*! Dark matter density parameter (cosmology.Omega_m - cosmology.Omega_b) */
+  double Omega_cdm;
+
+  /*! Dark-energy equation of state at the current time (cosmology.w)*/
+  double w_de;
+};
+
+/**
+ * @brief Structure for passing unit information to VELOCIraptor.
+ */
+struct unitinfo {
+
+  /*! Length conversion factor to kpc. */
+  double lengthtokpc;
+
+  /*! Velocity conversion factor to km/s. */
+  double velocitytokms;
+
+  /*! Mass conversion factor to solar masses. */
+  double masstosolarmass;
+
+  /*! Potential conversion factor to (km/s)^2. */
+  double energyperunitmass;
+
+  /*! Newton's gravitationl constant (phys_const.const_newton_G)*/
+  double gravity;
+
+  /*! Hubble constant at the current redshift (cosmology.H) */
+  double hubbleunit;
+};
+
+/**
+ * @brief Structure to hold the location of a top-level cell.
+ */
+struct cell_loc {
+
+  /*! Coordinates x,y,z */
+  double loc[3];
+};
+
+/**
+ * @brief Structure for passing simulation information to VELOCIraptor for a
+ * given call.
+ */
+struct siminfo {
+
+  /*! Size of periodic replications */
+  double period;
+
+  /*! Mass of the high-resolution DM particles in a zoom-in run. */
+  double zoomhigresolutionmass;
+
+  /*! Mean inter-particle separation of the DM particles */
+  double interparticlespacing;
+
+  /*! Spacial extent of the simulation volume */
+  double spacedimension[3];
+
+  /*! Number of top-level cells. */
+  int numcells;
+
+  /*! Locations of top-level cells. */
+  struct cell_loc *cell_loc;
+
+  /*! Top-level cell width. */
+  double cellwidth[3];
+
+  /*! Inverse of the top-level cell width. */
+  double icellwidth[3];
+
+  /*! Holds the node ID of each top-level cell. */
+  int *cellnodeids;
+
+  /*! Is this a cosmological simulation? */
+  int icosmologicalsim;
+
+  /*! Is this a zoom-in simulation? */
+  int izoomsim;
+
+  /*! Do we have DM particles? */
+  int idarkmatter;
+
+  /*! Do we have gas particles? */
+  int igas;
+
+  /*! Do we have star particles? */
+  int istar;
+
+  /*! Do we have BH particles? */
+  int ibh;
+
+  /*! Do we have other particles? */
+  int iother;
+};
+
+/**
+ * @brief Structure for group information back to swift
+ */
+struct groupinfo {
+
+  /*! Index of a #gpart in the global array on this MPI rank */
+  int index;
+
+  /*! Group number of the #gpart. */
+  long long groupID;
+};
+
+int InitVelociraptor(char *config_name, struct unitinfo unit_info,
+                     struct siminfo sim_info, const int numthreads);
+
+struct groupinfo *InvokeVelociraptor(
+    const int snapnum, char *output_name, struct cosmoinfo cosmo_info,
+    struct siminfo sim_info, const size_t num_gravity_parts,
+    const size_t num_hydro_parts, const size_t num_star_parts,
+    struct swift_vel_part *swift_parts, const int *cell_node_ids,
+    const int numthreads, const int return_group_flags,
+    int *const num_in_groups);
 
 #endif /* HAVE_VELOCIRAPTOR */
 
 /**
- * @brief Initialise VELOCIraptor with input and output file names along with
- * cosmological info needed to run.
+ * @brief Temporary structure used for the data copy mapper.
+ */
+struct velociraptor_copy_data {
+  const struct engine *e;
+  struct swift_vel_part *swift_parts;
+};
+
+/**
+ * @brief Mapper function to conver the #gpart into VELOCIraptor Particles.
  *
- * @param e The #engine.
+ * @param map_data The array of #gpart.
+ * @param nr_gparts The number of #gpart.
+ * @param extra_data Pointer to the #engine and to the array to fill.
+ */
+void velociraptor_convert_particles_mapper(void *map_data, int nr_gparts,
+                                           void *extra_data) {
+
+  /* Unpack the data */
+  struct gpart *restrict gparts = (struct gpart *)map_data;
+  struct velociraptor_copy_data *data =
+      (struct velociraptor_copy_data *)extra_data;
+  const struct engine *e = data->e;
+  const struct space *s = e->s;
+  struct swift_vel_part *swift_parts =
+      data->swift_parts + (ptrdiff_t)(gparts - s->gparts);
+
+  /* Handle on the other particle types */
+  const struct part *parts = s->parts;
+  const struct xpart *xparts = s->xparts;
+  const struct spart *sparts = s->sparts;
+
+  /* Handle on the physics modules */
+  const struct cosmology *cosmo = e->cosmology;
+  const struct hydro_props *hydro_props = e->hydro_properties;
+  const struct unit_system *us = e->internal_units;
+  const struct phys_const *phys_const = e->physical_constants;
+  const struct cooling_function_data *cool_func = e->cooling_func;
+
+  const float a_inv = e->cosmology->a_inv;
+
+  /* Convert particle properties into VELOCIraptor units.
+   * VELOCIraptor wants:
+   * - Co-moving positions,
+   * - Peculiar velocities,
+   * - Co-moving potential,
+   * - Physical internal energy (for the gas),
+   * - Temperatures (for the gas).
+   */
+  for (int i = 0; i < nr_gparts; i++) {
+
+    swift_parts[i].x[0] = gparts[i].x[0];
+    swift_parts[i].x[1] = gparts[i].x[1];
+    swift_parts[i].x[2] = gparts[i].x[2];
+
+    swift_parts[i].v[0] = gparts[i].v_full[0] * a_inv;
+    swift_parts[i].v[1] = gparts[i].v_full[1] * a_inv;
+    swift_parts[i].v[2] = gparts[i].v_full[2] * a_inv;
+
+    swift_parts[i].mass = gravity_get_mass(&gparts[i]);
+    swift_parts[i].potential = gravity_get_comoving_potential(&gparts[i]);
+
+    swift_parts[i].type = gparts[i].type;
+
+    swift_parts[i].index = i;
+#ifdef WITH_MPI
+    swift_parts[i].task = e->nodeID;
+#else
+    swift_parts[i].task = 0;
+#endif
+
+    /* Set gas particle IDs from their hydro counterparts and set internal
+     * energies. */
+    switch (gparts[i].type) {
+
+      case swift_type_gas: {
+        const struct part *p = &parts[-gparts[i].id_or_neg_offset];
+        const struct xpart *xp = &xparts[-gparts[i].id_or_neg_offset];
+
+        swift_parts[i].id = parts[-gparts[i].id_or_neg_offset].id;
+        swift_parts[i].u = hydro_get_drifted_physical_internal_energy(p, cosmo);
+        swift_parts[i].T = cooling_get_temperature(phys_const, hydro_props, us,
+                                                   cosmo, cool_func, p, xp);
+      } break;
+
+      case swift_type_stars:
+
+        swift_parts[i].id = sparts[-gparts[i].id_or_neg_offset].id;
+        swift_parts[i].u = 0.f;
+        swift_parts[i].T = 0.f;
+        break;
+
+      case swift_type_dark_matter:
+
+        swift_parts[i].id = gparts[i].id_or_neg_offset;
+        swift_parts[i].u = 0.f;
+        swift_parts[i].T = 0.f;
+        break;
+
+      default:
+        error("Particle type not handled by VELOCIraptor.");
+    }
+  }
+}
+
+/**
+ * @brief Initialise VELOCIraptor with configuration, units,
+ * simulation info needed to run.
  *
+ * @param e The #engine.
  */
 void velociraptor_init(struct engine *e) {
 
 #ifdef HAVE_VELOCIRAPTOR
-  struct space *s = e->s;
-  struct cosmoinfo cosmo_info;
-  struct unitinfo unit_info;
-  struct siminfo sim_info;
+  const ticks tic = getticks();
 
-  /* Set cosmological constants. */
-  cosmo_info.atime = e->cosmology->a;
-  cosmo_info.littleh = e->cosmology->h;
-  cosmo_info.Omega_m = e->cosmology->Omega_m;
-  cosmo_info.Omega_b = e->cosmology->Omega_b;
-  cosmo_info.Omega_Lambda = e->cosmology->Omega_lambda;
-  cosmo_info.Omega_cdm = e->cosmology->Omega_m - e->cosmology->Omega_b;
-  cosmo_info.w_de = e->cosmology->w;
-
-  message("Scale factor: %e", cosmo_info.atime);
-  message("Little h: %e", cosmo_info.littleh);
-  message("Omega_m: %e", cosmo_info.Omega_m);
-  message("Omega_b: %e", cosmo_info.Omega_b);
-  message("Omega_Lambda: %e", cosmo_info.Omega_Lambda);
-  message("Omega_cdm: %e", cosmo_info.Omega_cdm);
-  message("w_de: %e", cosmo_info.w_de);
+  /* Internal SWIFT units */
+  const struct unit_system *swift_us = e->internal_units;
 
-  if (e->cosmology->w != -1.)
-    error("w_de is not 1. It is: %lf", e->cosmology->w);
+  /* CGS units and physical constants in CGS */
+  struct unit_system cgs_us;
+  units_init_cgs(&cgs_us);
+  struct phys_const cgs_pc;
+  phys_const_init(&cgs_us, /*params=*/NULL, &cgs_pc);
 
   /* Set unit conversions. */
-  unit_info.lengthtokpc = 1.0;
-  unit_info.velocitytokms = 1.0;
-  unit_info.masstosolarmass = 1.0;
-  unit_info.energyperunitmass = 1.0;
+  struct unitinfo unit_info;
+  unit_info.lengthtokpc =
+      units_cgs_conversion_factor(swift_us, UNIT_CONV_LENGTH) /
+      (1000. * cgs_pc.const_parsec);
+  unit_info.velocitytokms =
+      units_cgs_conversion_factor(swift_us, UNIT_CONV_VELOCITY) / 1.0e5;
+  unit_info.masstosolarmass =
+      units_cgs_conversion_factor(swift_us, UNIT_CONV_MASS) /
+      cgs_pc.const_solar_mass;
+  unit_info.energyperunitmass =
+      units_cgs_conversion_factor(swift_us, UNIT_CONV_ENERGY_PER_UNIT_MASS) /
+      (1.0e10);
   unit_info.gravity = e->physical_constants->const_newton_G;
   unit_info.hubbleunit = e->cosmology->H0 / e->cosmology->h;
 
-  message("Length conversion factor: %e", unit_info.lengthtokpc);
-  message("Velocity conversion factor: %e", unit_info.velocitytokms);
-  message("Mass conversion factor: %e", unit_info.masstosolarmass);
-  message("Potential conversion factor: %e", unit_info.energyperunitmass);
-  message("G: %e", unit_info.gravity);
-  message("H: %e", unit_info.hubbleunit);
-
-  /* TODO: Find the total number of DM particles when running with star
-   * particles and BHs. */
-  const int total_nr_dmparts = e->total_nr_gparts - e->total_nr_parts;
+  /* Gather some information about the simulation */
+  struct siminfo sim_info;
 
-  /* Set simulation information. */
-  if (e->s->periodic) {
-    sim_info.period =
-        unit_info.lengthtokpc *
-        s->dim[0]; /* Physical size of box in VELOCIraptor units (kpc). */
-  } else
-    sim_info.period = 0.0;
-  sim_info.zoomhigresolutionmass = -1.0; /* Placeholder. */
-  sim_info.interparticlespacing = sim_info.period / cbrt(total_nr_dmparts);
-  if (e->policy & engine_policy_cosmology)
+  /* Are we running with cosmology? */
+  if (e->policy & engine_policy_cosmology) {
     sim_info.icosmologicalsim = 1;
-  else
+  } else {
     sim_info.icosmologicalsim = 0;
-  sim_info.spacedimension[0] = unit_info.lengthtokpc * s->dim[0];
-  sim_info.spacedimension[1] = unit_info.lengthtokpc * s->dim[1];
-  sim_info.spacedimension[2] = unit_info.lengthtokpc * s->dim[2];
-  sim_info.numcells = s->nr_cells;
-
-  sim_info.cellwidth[0] = unit_info.lengthtokpc * s->cells_top[0].width[0];
-  sim_info.cellwidth[1] = unit_info.lengthtokpc * s->cells_top[0].width[1];
-  sim_info.cellwidth[2] = unit_info.lengthtokpc * s->cells_top[0].width[2];
-
-  sim_info.icellwidth[0] = s->iwidth[0] / unit_info.lengthtokpc;
-  sim_info.icellwidth[1] = s->iwidth[1] / unit_info.lengthtokpc;
-  sim_info.icellwidth[2] = s->iwidth[2] / unit_info.lengthtokpc;
-
-  /* Only allocate cell location array on first call to velociraptor_init(). */
-  if (e->cell_loc == NULL) {
-    /* Allocate and populate top-level cell locations. */
-    if (posix_memalign((void **)&(e->cell_loc), 32,
-                       s->nr_cells * sizeof(struct cell_loc)) != 0)
-      error("Failed to allocate top-level cell locations for VELOCIraptor.");
-
-    for (int i = 0; i < s->nr_cells; i++) {
-      e->cell_loc[i].loc[0] = unit_info.lengthtokpc * s->cells_top[i].loc[0];
-      e->cell_loc[i].loc[1] = unit_info.lengthtokpc * s->cells_top[i].loc[1];
-      e->cell_loc[i].loc[2] = unit_info.lengthtokpc * s->cells_top[i].loc[2];
-    }
   }
-
-  sim_info.cell_loc = e->cell_loc;
-
-  char configfilename[PARSER_MAX_LINE_SIZE],
-      outputFileName[PARSER_MAX_LINE_SIZE + 128];
-  parser_get_param_string(e->parameter_file,
-                          "StructureFinding:config_file_name", configfilename);
-  snprintf(outputFileName, PARSER_MAX_LINE_SIZE + 128, "%s.VELOCIraptor",
-           e->stfBaseName);
-
-  message("Config file name: %s", configfilename);
-  message("Period: %e", sim_info.period);
-  message("Zoom high res mass: %e", sim_info.zoomhigresolutionmass);
-  message("Inter-particle spacing: %e", sim_info.interparticlespacing);
-  message("Cosmological: %d", sim_info.icosmologicalsim);
-  message("Space dimensions: (%e,%e,%e)", sim_info.spacedimension[0],
-          sim_info.spacedimension[1], sim_info.spacedimension[2]);
-  message("No. of top-level cells: %d", sim_info.numcells);
-  message("Top-level cell locations range: (%e,%e,%e) -> (%e,%e,%e)",
-          sim_info.cell_loc[0].loc[0], sim_info.cell_loc[0].loc[1],
-          sim_info.cell_loc[0].loc[2],
-          sim_info.cell_loc[sim_info.numcells - 1].loc[0],
-          sim_info.cell_loc[sim_info.numcells - 1].loc[1],
-          sim_info.cell_loc[sim_info.numcells - 1].loc[2]);
+  sim_info.izoomsim = 0;
+
+  /* Tell VELOCIraptor what we have in the simulation */
+  sim_info.idarkmatter = (e->total_nr_gparts - e->total_nr_parts > 0);
+  sim_info.igas = (e->policy & engine_policy_hydro);
+  sim_info.istar = (e->policy & engine_policy_stars);
+  sim_info.ibh = 0;  // sim_info.ibh = (e->policy&engine_policy_bh);
+  sim_info.iother = 0;
+
+  /* Be nice, talk! */
+  if (e->verbose) {
+    message("VELOCIraptor conf: Length conversion factor: %e",
+            unit_info.lengthtokpc);
+    message("VELOCIraptor conf: Velocity conversion factor: %e",
+            unit_info.velocitytokms);
+    message("VELOCIraptor conf: Mass conversion factor: %e",
+            unit_info.masstosolarmass);
+    message("VELOCIraptor conf: Internal energy conversion factor: %e",
+            unit_info.energyperunitmass);
+    message("VELOCIraptor conf: G: %e", unit_info.gravity);
+    message("VELOCIraptor conf: H0/h: %e", unit_info.hubbleunit);
+    message("VELOCIraptor conf: Config file name: %s", e->stf_config_file_name);
+    message("VELOCIraptor conf: Cosmological Simulation: %d",
+            sim_info.icosmologicalsim);
+  }
 
   /* Initialise VELOCIraptor. */
-  if (!InitVelociraptor(configfilename, outputFileName, cosmo_info, unit_info,
-                        sim_info))
-    error("Exiting. VELOCIraptor initialisation failed.");
+  if (InitVelociraptor(e->stf_config_file_name, unit_info, sim_info,
+                       e->nr_threads) != 1)
+    error("VELOCIraptor initialisation failed.");
+
+  if (e->verbose)
+    message("took %.3f %s.", clocks_from_ticks(getticks() - tic),
+            clocks_getunit());
 #else
   error("SWIFT not configure to run with VELOCIraptor.");
 #endif /* HAVE_VELOCIRAPTOR */
@@ -177,113 +377,241 @@ void velociraptor_init(struct engine *e) {
  * @brief Run VELOCIraptor with current particle data.
  *
  * @param e The #engine.
- *
+ * @param linked_with_snap Are we running at the same time as a snapshot dump?
  */
-void velociraptor_invoke(struct engine *e) {
+void velociraptor_invoke(struct engine *e, const int linked_with_snap) {
 
 #ifdef HAVE_VELOCIRAPTOR
-  struct space *s = e->s;
-  struct gpart *gparts = s->gparts;
-  struct part *parts = s->parts;
+
+  /* Handle on the particles */
+  const struct space *s = e->s;
   const size_t nr_gparts = s->nr_gparts;
-  const size_t nr_hydro_parts = s->nr_parts;
+  const size_t nr_parts = s->nr_parts;
+  const size_t nr_sparts = s->nr_sparts;
   const int nr_cells = s->nr_cells;
-  int *cell_node_ids = NULL;
+  const struct cell *cells_top = s->cells_top;
 
   /* Allow thread to run on any core for the duration of the call to
-   * VELOCIraptor so that
-   * when OpenMP threads are spawned they can run on any core on the processor.
-   */
+   * VELOCIraptor so that  when OpenMP threads are spawned
+   * they can run on any core on the processor. */
   const int nr_cores = sysconf(_SC_NPROCESSORS_ONLN);
-  cpu_set_t cpuset;
   pthread_t thread = pthread_self();
 
   /* Set affinity mask to include all cores on the CPU for VELOCIraptor. */
+  cpu_set_t cpuset;
   CPU_ZERO(&cpuset);
   for (int j = 0; j < nr_cores; j++) CPU_SET(j, &cpuset);
-
   pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
 
+  /* Set cosmology information for this point in time */
+  struct cosmoinfo cosmo_info;
+  cosmo_info.atime = e->cosmology->a;
+  cosmo_info.littleh = e->cosmology->h;
+  cosmo_info.Omega_m = e->cosmology->Omega_m;
+  cosmo_info.Omega_b = e->cosmology->Omega_b;
+  cosmo_info.Omega_r = e->cosmology->Omega_r;
+  cosmo_info.Omega_k = e->cosmology->Omega_k;
+  cosmo_info.Omega_nu = 0.;
+  cosmo_info.Omega_Lambda = e->cosmology->Omega_lambda;
+  cosmo_info.Omega_cdm = e->cosmology->Omega_m - e->cosmology->Omega_b;
+  cosmo_info.w_de = e->cosmology->w;
+
+  /* Report the cosmo info we use */
+  if (e->verbose) {
+    message("VELOCIraptor conf: Scale factor: %e", cosmo_info.atime);
+    message("VELOCIraptor conf: Little h: %e", cosmo_info.littleh);
+    message("VELOCIraptor conf: Omega_m: %e", cosmo_info.Omega_m);
+    message("VELOCIraptor conf: Omega_b: %e", cosmo_info.Omega_b);
+    message("VELOCIraptor conf: Omega_Lambda: %e", cosmo_info.Omega_Lambda);
+    message("VELOCIraptor conf: Omega_cdm: %e", cosmo_info.Omega_cdm);
+    message("VELOCIraptor conf: w_de: %e", cosmo_info.w_de);
+  }
+
+  /* Update the simulation information */
+  struct siminfo sim_info;
+
+  /* Period of the box (Note we assume a cubic box!) */
+  if (e->s->periodic) {
+    sim_info.period = s->dim[0];
+  } else {
+    sim_info.period = 0.0;
+  }
+
+  /* Tell VELOCIraptor this is not a zoom-in simulation */
+  sim_info.zoomhigresolutionmass = -1.0;
+
+  /* Are we running with cosmology? */
+  if (e->policy & engine_policy_cosmology) {
+    sim_info.icosmologicalsim = 1;
+    sim_info.izoomsim = 0;
+    const size_t total_nr_baryons = e->total_nr_parts + e->total_nr_sparts;
+    const size_t total_nr_dmparts = e->total_nr_gparts - total_nr_baryons;
+    sim_info.interparticlespacing = sim_info.period / cbrt(total_nr_dmparts);
+  } else {
+    sim_info.icosmologicalsim = 0;
+    sim_info.izoomsim = 0;
+    sim_info.interparticlespacing = -1;
+  }
+
+  /* Set the spatial extent of the simulation volume */
+  sim_info.spacedimension[0] = s->dim[0];
+  sim_info.spacedimension[1] = s->dim[1];
+  sim_info.spacedimension[2] = s->dim[2];
+
+  /* Store number of top-level cells */
+  sim_info.numcells = s->nr_cells;
+
+  /* Size and inverse size of the top-level cells in VELOCIraptor units */
+  sim_info.cellwidth[0] = s->cells_top[0].width[0];
+  sim_info.cellwidth[1] = s->cells_top[0].width[1];
+  sim_info.cellwidth[2] = s->cells_top[0].width[2];
+  sim_info.icellwidth[0] = s->iwidth[0];
+  sim_info.icellwidth[1] = s->iwidth[1];
+  sim_info.icellwidth[2] = s->iwidth[2];
+
   ticks tic = getticks();
 
-  /* Allocate and populate array of cell node IDs. */
-  if (posix_memalign((void **)&cell_node_ids, 32, nr_cells * sizeof(int)) != 0)
+  /* Allocate and populate array of cell node IDs and positions. */
+  int *cell_node_ids = NULL;
+  if (posix_memalign((void **)&sim_info.cell_loc, SWIFT_STRUCT_ALIGNMENT,
+                     s->nr_cells * sizeof(struct cell_loc)) != 0)
+    error("Failed to allocate top-level cell locations for VELOCIraptor.");
+  if (posix_memalign((void **)&cell_node_ids, SWIFT_STRUCT_ALIGNMENT,
+                     nr_cells * sizeof(int)) != 0)
     error("Failed to allocate list of cells node IDs for VELOCIraptor.");
 
-  for (int i = 0; i < nr_cells; i++) cell_node_ids[i] = s->cells_top[i].nodeID;
+  for (int i = 0; i < s->nr_cells; i++) {
+    cell_node_ids[i] = cells_top[i].nodeID;
+
+    sim_info.cell_loc[i].loc[0] = cells_top[i].loc[0];
+    sim_info.cell_loc[i].loc[1] = cells_top[i].loc[1];
+    sim_info.cell_loc[i].loc[2] = cells_top[i].loc[2];
+  }
+
+  if (e->verbose) {
+    message("VELOCIraptor conf: Space dimensions: (%e,%e,%e)",
+            sim_info.spacedimension[0], sim_info.spacedimension[1],
+            sim_info.spacedimension[2]);
+    message("VELOCIraptor conf: No. of top-level cells: %d", sim_info.numcells);
+    message(
+        "VELOCIraptor conf: Top-level cell locations range: (%e,%e,%e) -> "
+        "(%e,%e,%e)",
+        sim_info.cell_loc[0].loc[0], sim_info.cell_loc[0].loc[1],
+        sim_info.cell_loc[0].loc[2],
+        sim_info.cell_loc[sim_info.numcells - 1].loc[0],
+        sim_info.cell_loc[sim_info.numcells - 1].loc[1],
+        sim_info.cell_loc[sim_info.numcells - 1].loc[2]);
+  }
+
+  /* Report timing */
+  if (e->verbose)
+    message("VR Collecting top-level cell info took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
 
-  message("MPI rank %d sending %zu gparts to VELOCIraptor.", engine_rank,
-          nr_gparts);
+  /* Mention the number of particles being sent */
+  if (e->verbose)
+    message(
+        "VELOCIraptor conf: MPI rank %d sending %zu gparts to VELOCIraptor.",
+        engine_rank, nr_gparts);
 
-  /* Append base name with either the step number or time depending on what
-   * format is specified in the parameter file. */
+  /* Append base name with the current output number */
   char outputFileName[PARSER_MAX_LINE_SIZE + 128];
-  if (e->stf_output_freq_format == STEPS) {
+
+  /* What should the filename be? */
+  if (linked_with_snap) {
+    snprintf(outputFileName, PARSER_MAX_LINE_SIZE + 128,
+             "stf_%s_%04i.VELOCIraptor", e->snapshot_base_name,
+             e->snapshot_output_count);
+  } else {
     snprintf(outputFileName, PARSER_MAX_LINE_SIZE + 128, "%s_%04i.VELOCIraptor",
-             e->stfBaseName, e->step);
-  } else if (e->stf_output_freq_format == TIME) {
-    snprintf(outputFileName, PARSER_MAX_LINE_SIZE + 128, "%s_%04e.VELOCIraptor",
-             e->stfBaseName, e->time);
+             e->stf_base_name, e->stf_output_count);
+  }
+
+  /* What is the snapshot number? */
+  int snapnum;
+  if (linked_with_snap) {
+    snapnum = e->snapshot_output_count;
+  } else {
+    snapnum = e->stf_output_count;
   }
 
+  tic = getticks();
+
   /* Allocate and populate an array of swift_vel_parts to be passed to
    * VELOCIraptor. */
   struct swift_vel_part *swift_parts = NULL;
-
   if (posix_memalign((void **)&swift_parts, part_align,
                      nr_gparts * sizeof(struct swift_vel_part)) != 0)
     error("Failed to allocate array of particles for VELOCIraptor.");
 
-  bzero(swift_parts, nr_gparts * sizeof(struct swift_vel_part));
-
-  const float energy_scale = 1.0;
-  const float a = e->cosmology->a;
+  struct velociraptor_copy_data copy_data = {e, swift_parts};
+  threadpool_map(&e->threadpool, velociraptor_convert_particles_mapper,
+                 s->gparts, nr_gparts, sizeof(struct gpart), 0, &copy_data);
 
-  message("Energy scaling factor: %f", energy_scale);
-  message("a: %f", a);
+  /* Report timing */
+  if (e->verbose)
+    message("VR Collecting particle info took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
 
-  /* Convert particle properties into VELOCIraptor units */
-  for (size_t i = 0; i < nr_gparts; i++) {
-    swift_parts[i].x[0] = gparts[i].x[0];
-    swift_parts[i].x[1] = gparts[i].x[1];
-    swift_parts[i].x[2] = gparts[i].x[2];
-    swift_parts[i].v[0] = gparts[i].v_full[0] / a;
-    swift_parts[i].v[1] = gparts[i].v_full[1] / a;
-    swift_parts[i].v[2] = gparts[i].v_full[2] / a;
-    swift_parts[i].mass = gravity_get_mass(&gparts[i]);
-    swift_parts[i].potential = gravity_get_comoving_potential(&gparts[i]);
-    swift_parts[i].type = gparts[i].type;
+  tic = getticks();
 
-    /* Set gas particle IDs from their hydro counterparts and set internal
-     * energies. */
-    if (gparts[i].type == swift_type_gas) {
-      swift_parts[i].id = parts[-gparts[i].id_or_neg_offset].id;
-      swift_parts[i].u =
-          hydro_get_physical_internal_energy(
-              &parts[-gparts[i].id_or_neg_offset], e->cosmology) *
-          energy_scale;
-    } else if (gparts[i].type == swift_type_dark_matter) {
-      swift_parts[i].id = gparts[i].id_or_neg_offset;
-      swift_parts[i].u = 0.f;
-    } else {
-      error("Particle type not handled by velociraptor (yet?) !");
-    }
-  }
+  /* Values returned by VELOCIRaptor */
+  int num_gparts_in_groups = -1;
+  struct groupinfo *group_info = NULL;
 
   /* Call VELOCIraptor. */
-  if (!InvokeVelociraptor(nr_gparts, nr_hydro_parts, swift_parts, cell_node_ids,
-                          outputFileName))
+  group_info = (struct groupinfo *)InvokeVelociraptor(
+      snapnum, outputFileName, cosmo_info, sim_info, nr_gparts, nr_parts,
+      nr_sparts, swift_parts, cell_node_ids, e->nr_threads, linked_with_snap,
+      &num_gparts_in_groups);
+
+  /* Check that the ouput is valid */
+  if (linked_with_snap && group_info == NULL && num_gparts_in_groups < 0) {
     error("Exiting. Call to VELOCIraptor failed on rank: %d.", e->nodeID);
+  }
+  if (!linked_with_snap && group_info != NULL) {
+    error("VELOCIraptor returned an array whilst it should not have.");
+  }
+
+  /* Report timing */
+  if (e->verbose)
+    message("VR Invokation of velociraptor took %.3f %s.",
+            clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+  tic = getticks();
+
+  /* Assign the group IDs back to the gparts */
+  if (linked_with_snap) {
+
+    if (posix_memalign((void **)&s->gpart_group_data, part_align,
+                       nr_gparts * sizeof(struct velociraptor_gpart_data)) != 0)
+      error("Failed to allocate array of gpart data for VELOCIraptor i/o.");
+
+    struct velociraptor_gpart_data *data = s->gpart_group_data;
+
+    /* Zero the array (gparts not in groups have an ID of 0) */
+    bzero(data, nr_gparts * sizeof(struct velociraptor_gpart_data));
+
+    /* Copy the data at the right place */
+    for (int i = 0; i < num_gparts_in_groups; i++) {
+      data[group_info[i].index].groupID = group_info[i].groupID;
+    }
+
+    /* Report timing */
+    if (e->verbose)
+      message("VR Copying group information back took %.3f %s.",
+              clocks_from_ticks(getticks() - tic), clocks_getunit());
+
+    /* Free the array returned by VELOCIraptor */
+    free(group_info);
+  }
 
   /* Reset the pthread affinity mask after VELOCIraptor returns. */
   pthread_setaffinity_np(thread, sizeof(cpu_set_t), engine_entry_affinity());
 
-  /* Free cell node ids after VELOCIraptor has copied them. */
-  free(cell_node_ids);
-  free(swift_parts);
+  /* Increase output counter (if not linked with snapshots) */
+  if (!linked_with_snap) e->stf_output_count++;
 
-  message("VELOCIraptor took %.3f %s on rank %d.",
-          clocks_from_ticks(getticks() - tic), clocks_getunit(), engine_rank);
 #else
   error("SWIFT not configure to run with VELOCIraptor.");
 #endif /* HAVE_VELOCIRAPTOR */
diff --git a/src/velociraptor_interface.h b/src/velociraptor_interface.h
index 0f6b8d339471f4bb1409baae62475a74e68cb5b1..2547fa56c1677e93b1c59a1435e9a6ab92c1f308 100644
--- a/src/velociraptor_interface.h
+++ b/src/velociraptor_interface.h
@@ -25,81 +25,8 @@
 /* Forward declaration */
 struct engine;
 
-/* Structure for passing cosmological information to VELOCIraptor. */
-struct cosmoinfo {
-
-  /*! Current expansion factor of the Universe. (cosmology.a) */
-  double atime;
-
-  /*! Reduced Hubble constant (H0 / (100km/s/Mpc) (cosmology.h) */
-  double littleh;
-
-  /*! Matter density parameter (cosmology.Omega_m) */
-  double Omega_m;
-
-  /*! Baryon density parameter (cosmology.Omega_b) */
-  double Omega_b;
-
-  /*! Radiation constant density parameter (cosmology.Omega_lambda) */
-  double Omega_Lambda;
-
-  /*! Dark matter density parameter (cosmology.Omega_m - cosmology.Omega_b) */
-  double Omega_cdm;
-
-  /*! Dark-energy equation of state at the current time (cosmology.w)*/
-  double w_de;
-};
-
-/* Structure for passing unit information to VELOCIraptor. */
-struct unitinfo {
-
-  /* Length conversion factor to kpc. */
-  double lengthtokpc;
-
-  /* Velocity conversion factor to km/s. */
-  double velocitytokms;
-
-  /* Mass conversion factor to solar masses. */
-  double masstosolarmass;
-
-  /* Potential conversion factor. */
-  double energyperunitmass;
-
-  /*! Newton's gravitationl constant (phys_const.const_newton_G)*/
-  double gravity;
-
-  /*! Hubble constant at the current redshift (cosmology.H) */
-  double hubbleunit;
-};
-
-/* Structure to hold the location of a top-level cell. */
-struct cell_loc {
-
-  /* Coordinates x,y,z */
-  double loc[3];
-};
-
-/* Structure for passing simulation information to VELOCIraptor. */
-struct siminfo {
-  double period, zoomhigresolutionmass, interparticlespacing, spacedimension[3];
-
-  /* Number of top-cells. */
-  int numcells;
-
-  /*! Locations of top-level cells. */
-  struct cell_loc *cell_loc;
-
-  /*! Top-level cell width. */
-  double cellwidth[3];
-
-  /*! Inverse of the top-level cell width. */
-  double icellwidth[3];
-
-  int icosmologicalsim;
-};
-
 /* VELOCIraptor wrapper functions. */
 void velociraptor_init(struct engine *e);
-void velociraptor_invoke(struct engine *e);
+void velociraptor_invoke(struct engine *e, const int linked_with_snap);
 
 #endif /* SWIFT_VELOCIRAPTOR_INTERFACE_H */
diff --git a/src/velociraptor_io.h b/src/velociraptor_io.h
new file mode 100644
index 0000000000000000000000000000000000000000..f18398219bfbc5cd6bb58a37b103f29527fa5589
--- /dev/null
+++ b/src/velociraptor_io.h
@@ -0,0 +1,78 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_VELOCIRAPTOR_IO_H
+#define SWIFT_VELOCIRAPTOR_IO_H
+
+/* Config parameters. */
+#include "../config.h"
+
+INLINE static void velociraptor_convert_part_groupID(const struct engine* e,
+                                                     const struct part* p,
+                                                     const struct xpart* xp,
+                                                     long long* ret) {
+  if (p->gpart == NULL)
+    ret[0] = 0.f;
+  else {
+    const ptrdiff_t offset = p->gpart - e->s->gparts;
+    *ret = (e->s->gpart_group_data + offset)->groupID;
+  }
+}
+
+INLINE static void velociraptor_convert_spart_groupID(const struct engine* e,
+                                                      const struct spart* sp,
+                                                      long long* ret) {
+  if (sp->gpart == NULL)
+    ret[0] = 0.f;
+  else {
+    const ptrdiff_t offset = sp->gpart - e->s->gparts;
+    *ret = (e->s->gpart_group_data + offset)->groupID;
+  }
+}
+
+__attribute__((always_inline)) INLINE static int velociraptor_write_parts(
+    const struct part* parts, const struct xpart* xparts,
+    struct io_props* list) {
+
+  list[0] = io_make_output_field_convert_part(
+      "GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, parts, xparts,
+      velociraptor_convert_part_groupID);
+
+  return 1;
+}
+
+__attribute__((always_inline)) INLINE static int velociraptor_write_gparts(
+    const struct velociraptor_gpart_data* group_data, struct io_props* list) {
+
+  list[0] = io_make_output_field("GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS,
+                                 group_data, groupID);
+
+  return 1;
+}
+
+__attribute__((always_inline)) INLINE static int velociraptor_write_sparts(
+    const struct spart* sparts, struct io_props* list) {
+
+  list[0] = io_make_output_field_convert_spart(
+      "GroupID", LONGLONG, 1, UNIT_CONV_NO_UNITS, sparts,
+      velociraptor_convert_spart_groupID);
+
+  return 1;
+}
+
+#endif /* SWIFT_VELOCIRAPTOR_IO_H */
diff --git a/src/velociraptor_struct.h b/src/velociraptor_struct.h
new file mode 100644
index 0000000000000000000000000000000000000000..b998263a6ba2fe0aaa6552f274cb8f4ee85d3b1c
--- /dev/null
+++ b/src/velociraptor_struct.h
@@ -0,0 +1,34 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#ifndef SWIFT_VELOCIRAPTOR_STRUCT_H
+#define SWIFT_VELOCIRAPTOR_STRUCT_H
+
+/* Config parameters. */
+#include "../config.h"
+
+/**
+ * @brief Data returned by VELOCIraptor for each #gpart.
+ */
+struct velociraptor_gpart_data {
+
+  /*! Group ID of that #gpart. */
+  long long groupID;
+};
+
+#endif /* SWIFT_VELOCIRAPTOR_STRUCT_H */
diff --git a/src/version.c b/src/version.c
index 69f70b9aec3549c061c162f2ce183f8fafcc2e9f..6fe8c38fc22f3c06fae42adbba83a65aff208bb9 100644
--- a/src/version.c
+++ b/src/version.c
@@ -27,6 +27,9 @@
 #ifdef HAVE_METIS
 #include <metis.h>
 #endif
+#ifdef HAVE_PARMETIS
+#include <parmetis.h>
+#endif
 #endif
 
 #ifdef HAVE_HDF5
@@ -320,6 +323,23 @@ const char *metis_version(void) {
   return version;
 }
 
+/**
+ * @brief return the ParMETIS version used when SWIFT was built.
+ *
+ * @result description of the ParMETIS version.
+ */
+const char *parmetis_version(void) {
+
+  static char version[256] = {0};
+#if defined(WITH_MPI) && defined(HAVE_PARMETIS)
+  sprintf(version, "%i.%i.%i", PARMETIS_MAJOR_VERSION, PARMETIS_MINOR_VERSION,
+          PARMETIS_SUBMINOR_VERSION);
+#else
+  sprintf(version, "Unknown version");
+#endif
+  return version;
+}
+
 /**
  * @brief return the FFTW version used when SWIFT was built.
  *
@@ -424,6 +444,9 @@ void greetings(void) {
 #ifdef HAVE_METIS
   printf(" METIS library version: %s\n", metis_version());
 #endif
+#ifdef HAVE_PARMETIS
+  printf(" ParMETIS library version: %s\n", parmetis_version());
+#endif
 #endif
   printf("\n");
 }
diff --git a/src/version.h b/src/version.h
index 44119b6a3bbdf57c3f0195bae5ff329d05c61fd5..75a371bd9e47b19c1887b556accd486da50d9cea 100644
--- a/src/version.h
+++ b/src/version.h
@@ -32,6 +32,7 @@ const char* compiler_name(void);
 const char* compiler_version(void);
 const char* mpi_version(void);
 const char* metis_version(void);
+const char* parmetis_version(void);
 const char* hdf5_version(void);
 const char* fftw3_version(void);
 const char* libgsl_version(void);
diff --git a/tests/Makefile.am b/tests/Makefile.am
index d99b68f224f542dcdc60ae59fc6e2042ae20d9b7..f18b6c44c63f6e394bc2b47616f86da5dbcd54e2 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -15,31 +15,33 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 # Add the source directory and the non-standard paths to the included library headers to CFLAGS
-AM_CFLAGS = -I$(top_srcdir)/src $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS)
+AM_CFLAGS = -I$(top_srcdir)/src $(HDF5_CPPFLAGS) $(GSL_INCS) $(FFTW_INCS) $(NUMA_INCS)
 
-AM_LDFLAGS = ../src/.libs/libswiftsim.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
+AM_LDFLAGS = ../src/.libs/libswiftsim.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(FFTW_LIBS) $(NUMA_LIBS) $(TCMALLOC_LIBS) $(JEMALLOC_LIBS) $(TBBMALLOC_LIBS) $(GRACKLE_LIBS) $(GSL_LIBS) $(PROFILER_LIBS)
 
 # List of programs and scripts to run in the test suite
-TESTS = testGreetings testMaths testReading.sh testSingle testKernel testSymmetry \
+TESTS = testGreetings testMaths testReading.sh testSingle testKernel \
         testActivePair.sh test27cells.sh test27cellsPerturbed.sh  \
         testParser.sh testSPHStep test125cells.sh test125cellsPerturbed.sh testFFT \
-        testAdiabaticIndex \
+        testAdiabaticIndex testRandom \
         testMatrixInversion testThreadpool testDump testLogger testInteractions.sh \
         testVoronoi1D testVoronoi2D testVoronoi3D testGravityDerivatives \
 	testPeriodicBC.sh testPeriodicBCPerturbed.sh testPotentialSelf \
 	testPotentialPair testEOS testUtilities testSelectOutput.sh \
-	testCbrt testCosmology testOutputList testFormat.sh
+	testCbrt testCosmology testOutputList testFormat.sh \
+	test27cellsStars.sh test27cellsStarsPerturbed.sh
 
 # List of test programs to compile
 check_PROGRAMS = testGreetings testReading testSingle testTimeIntegration \
 		 testSPHStep testActivePair test27cells test27cells_subset test125cells testParser \
-                 testKernel testFFT testInteractions testMaths \
+                 testKernel testFFT testInteractions testMaths testRandom \
                  testSymmetry testThreadpool \
                  testAdiabaticIndex testRiemannExact testRiemannTRRS \
                  testRiemannHLLC testMatrixInversion testDump testLogger \
 		 testVoronoi1D testVoronoi2D testVoronoi3D testPeriodicBC \
 		 testGravityDerivatives testPotentialSelf testPotentialPair testEOS testUtilities \
-		 testSelectOutput testCbrt testCosmology testOutputList
+		 testSelectOutput testCbrt testCosmology testOutputList test27cellsStars \
+		 test27cellsStars_subset testCooling
 
 # Rebuild tests when SWIFT is updated.
 $(check_PROGRAMS): ../src/.libs/libswiftsim.a
@@ -49,6 +51,8 @@ testGreetings_SOURCES = testGreetings.c
 
 testMaths_SOURCES = testMaths.c
 
+testRandom_SOURCES = testRandom.c
+
 testReading_SOURCES = testReading.c
 
 testSelectOutput_SOURCES = testSelectOutput.c
@@ -76,6 +80,12 @@ test27cells_subset_SOURCES = test27cells.c
 
 test27cells_subset_CFLAGS = $(AM_CFLAGS) -DTEST_DOSELF_SUBSET -DTEST_DOPAIR_SUBSET
 
+test27cellsStars_SOURCES = test27cellsStars.c
+
+test27cellsStars_subset_SOURCES = test27cellsStars.c
+
+test27cellsStars_subset_CFLAGS = $(AM_CFLAGS) -DTEST_DOSELF_SUBSET -DTEST_DOPAIR_SUBSET
+
 testPeriodicBC_SOURCES = testPeriodicBC.c
 
 test125cells_SOURCES = test125cells.c
@@ -120,6 +130,8 @@ testEOS_SOURCES = testEOS.c
 
 testUtilities_SOURCES = testUtilities.c
 
+testCooling_SOURCES = testCooling.c
+
 # Files necessary for distribution
 EXTRA_DIST = testReading.sh makeInput.py testActivePair.sh \
 	     test27cells.sh test27cellsPerturbed.sh testParser.sh testPeriodicBC.sh \
@@ -130,4 +142,6 @@ EXTRA_DIST = testReading.sh makeInput.py testActivePair.sh \
 	     fft_params.yml tolerance_periodic_BC_normal.dat tolerance_periodic_BC_perturbed.dat \
 	     testEOS.sh testEOS_plot.sh testSelectOutput.sh selectOutput.yml \
              output_list_params.yml output_list_time.txt output_list_redshift.txt \
-             output_list_scale_factor.txt
+             output_list_scale_factor.txt testEOS.sh testEOS_plot.sh \
+	     test27cellsStars.sh test27cellsStarsPerturbed.sh star_tolerance_27_normal.dat \
+	     star_tolerance_27_perturbed.dat star_tolerance_27_perturbed_h.dat star_tolerance_27_perturbed_h2.dat
diff --git a/tests/logger.yml b/tests/logger.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eaf8731f0e09df40b891c7b57be35cd9e14fc5cc
--- /dev/null
+++ b/tests/logger.yml
@@ -0,0 +1,5 @@
+# Parameters governing the logger snapshot system
+Logger:
+  delta_step:           10     # (Optional) Update the particle log every this many updates
+  initial_buffer_size:   .1    # buffer size in GB
+  basename:             indice # Common part of the filenames
diff --git a/tests/star_tolerance_27_normal.dat b/tests/star_tolerance_27_normal.dat
new file mode 100644
index 0000000000000000000000000000000000000000..c243da2bcd9f5177ab471b2b3e622bdb1ee677d4
--- /dev/null
+++ b/tests/star_tolerance_27_normal.dat
@@ -0,0 +1,4 @@
+#   ID      pos_x      pos_y      pos_z     wcount     wcount_dh
+    0	    1e-6       1e-6	  1e-6 	    4e-4       1.2e-2
+    0	    1e-6       1e-6	  1e-6 	    1e-4       2e-3
+    0	    1e-6       1e-6	  1e-6 	    1e-6       1e-6
diff --git a/tests/star_tolerance_27_perturbed.dat b/tests/star_tolerance_27_perturbed.dat
new file mode 100644
index 0000000000000000000000000000000000000000..9e6886834e9a793d37dfe77c9713697cc7f6f606
--- /dev/null
+++ b/tests/star_tolerance_27_perturbed.dat
@@ -0,0 +1,4 @@
+#   ID      pos_x      pos_y      pos_z     wcount     wcount_dh
+    0	    1e-6       1e-6	  1e-6 	    2e-4       1e-2
+    0	    1e-6       1e-6	  1e-6 	    1e-5       2.4e-3
+    0	    1e-6       1e-6	  1e-6 	    1e-6       1e-2
diff --git a/tests/star_tolerance_27_perturbed_h.dat b/tests/star_tolerance_27_perturbed_h.dat
new file mode 100644
index 0000000000000000000000000000000000000000..20367e6f09ac171cad17ab5418304bd5674e78d6
--- /dev/null
+++ b/tests/star_tolerance_27_perturbed_h.dat
@@ -0,0 +1,4 @@
+#   ID        pos_x      pos_y        pos_z	      wcount     wcount_dh
+    0	      1e-6       1e-6	      1e-6    	      5e-4       1.4e-2
+    0	      1e-6       1e-6	      1e-6 	      1e-5       4e-3
+    0	      1e-6       1e-6	      1e-6 	      1e-6       1e0
diff --git a/tests/star_tolerance_27_perturbed_h2.dat b/tests/star_tolerance_27_perturbed_h2.dat
new file mode 100644
index 0000000000000000000000000000000000000000..fe89f21dd2fe37360bc0e3a2c5431528075bf2e5
--- /dev/null
+++ b/tests/star_tolerance_27_perturbed_h2.dat
@@ -0,0 +1,4 @@
+#   ID        pos_x      pos_y      pos_z    wcount     wcount_dh
+    0	      1e-6       1e-6	    1e-6     5e-4       1.5e-2
+    0	      1e-6       1e-6	    1e-6     1e-5       5.86e-3
+    0	      1e-6       1e-6	    1e-6     1e-6       1e0
diff --git a/tests/test125cells.c b/tests/test125cells.c
index 2a2c20dbb064539b481e169b49b74389e79a8174..7fc743725a58ed82c815dc97b3a7b89351c3f2c0 100644
--- a/tests/test125cells.c
+++ b/tests/test125cells.c
@@ -31,19 +31,12 @@
 #include "swift.h"
 
 #if defined(WITH_VECTORIZATION)
-#define DOSELF2 runner_doself2_force_vec
-#define DOPAIR2 runner_dopair2_branch_force
 #define DOSELF2_NAME "runner_doself2_force_vec"
 #define DOPAIR2_NAME "runner_dopair2_force_vec"
 #endif
 
-#ifndef DOSELF2
-#define DOSELF2 runner_doself2_force
+#ifndef DOSELF2_NAME
 #define DOSELF2_NAME "runner_doself2_density"
-#endif
-
-#ifndef DOPAIR2
-#define DOPAIR2 runner_dopair2_branch_force
 #define DOPAIR2_NAME "runner_dopair2_force"
 #endif
 
@@ -118,7 +111,8 @@ void set_energy_state(struct part *part, enum pressure_field press, float size,
   part->entropy = pressure / pow_gamma(density);
 #elif defined(DEFAULT_SPH)
   part->u = pressure / (hydro_gamma_minus_one * density);
-#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH)
+#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH) || \
+    defined(HOPKINS_PU_SPH_MONAGHAN) || defined(ANARCHY_PU_SPH)
   part->u = pressure / (hydro_gamma_minus_one * density);
 #elif defined(PLANETARY_SPH)
   part->u = pressure / (hydro_gamma_minus_one * density);
@@ -152,19 +146,19 @@ void get_solution(const struct cell *main_cell, struct solution_part *solution,
                   float density, enum velocity_field vel,
                   enum pressure_field press, float size) {
 
-  for (int i = 0; i < main_cell->count; ++i) {
+  for (int i = 0; i < main_cell->hydro.count; ++i) {
 
-    solution[i].id = main_cell->parts[i].id;
+    solution[i].id = main_cell->hydro.parts[i].id;
 
-    solution[i].x[0] = main_cell->parts[i].x[0];
-    solution[i].x[1] = main_cell->parts[i].x[1];
-    solution[i].x[2] = main_cell->parts[i].x[2];
+    solution[i].x[0] = main_cell->hydro.parts[i].x[0];
+    solution[i].x[1] = main_cell->hydro.parts[i].x[1];
+    solution[i].x[2] = main_cell->hydro.parts[i].x[2];
 
-    solution[i].v[0] = main_cell->parts[i].v[0];
-    solution[i].v[1] = main_cell->parts[i].v[1];
-    solution[i].v[2] = main_cell->parts[i].v[2];
+    solution[i].v[0] = main_cell->hydro.parts[i].v[0];
+    solution[i].v[1] = main_cell->hydro.parts[i].v[1];
+    solution[i].v[2] = main_cell->hydro.parts[i].v[2];
 
-    solution[i].h = main_cell->parts[i].h;
+    solution[i].h = main_cell->hydro.parts[i].h;
 
     solution[i].rho = density;
 
@@ -213,9 +207,9 @@ void reset_particles(struct cell *c, struct hydro_space *hs,
                      enum velocity_field vel, enum pressure_field press,
                      float size, float density) {
 
-  for (int i = 0; i < c->count; ++i) {
+  for (int i = 0; i < c->hydro.count; ++i) {
 
-    struct part *p = &c->parts[i];
+    struct part *p = &c->hydro.parts[i];
 
     set_velocity(p, vel, size);
     set_energy_state(p, press, size, density);
@@ -272,20 +266,20 @@ struct cell *make_cell(size_t n, const double offset[3], double size, double h,
   struct cell *cell = (struct cell *)malloc(sizeof(struct cell));
   bzero(cell, sizeof(struct cell));
 
-  if (posix_memalign((void **)&cell->parts, part_align,
+  if (posix_memalign((void **)&cell->hydro.parts, part_align,
                      count * sizeof(struct part)) != 0)
     error("couldn't allocate particles, no. of particles: %d", (int)count);
-  if (posix_memalign((void **)&cell->xparts, xpart_align,
+  if (posix_memalign((void **)&cell->hydro.xparts, xpart_align,
                      count * sizeof(struct xpart)) != 0)
     error("couldn't allocate particles, no. of x-particles: %d", (int)count);
-  bzero(cell->parts, count * sizeof(struct part));
-  bzero(cell->xparts, count * sizeof(struct xpart));
+  bzero(cell->hydro.parts, count * sizeof(struct part));
+  bzero(cell->hydro.xparts, count * sizeof(struct xpart));
 
   float h_max = 0.f;
 
   /* Construct the parts */
-  struct part *part = cell->parts;
-  struct xpart *xpart = cell->xparts;
+  struct part *part = cell->hydro.parts;
+  struct xpart *xpart = cell->hydro.xparts;
   for (size_t x = 0; x < n; ++x) {
     for (size_t y = 0; y < n; ++y) {
       for (size_t z = 0; z < n; ++z) {
@@ -346,11 +340,11 @@ struct cell *make_cell(size_t n, const double offset[3], double size, double h,
 
   /* Cell properties */
   cell->split = 0;
-  cell->h_max = h_max;
-  cell->count = count;
-  cell->gcount = 0;
-  cell->dx_max_part = 0.;
-  cell->dx_max_sort = 0.;
+  cell->hydro.h_max = h_max;
+  cell->hydro.count = count;
+  cell->grav.count = 0;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
   cell->width[0] = size;
   cell->width[1] = size;
   cell->width[2] = size;
@@ -358,24 +352,24 @@ struct cell *make_cell(size_t n, const double offset[3], double size, double h,
   cell->loc[1] = offset[1];
   cell->loc[2] = offset[2];
 
-  cell->ti_old_part = 8;
-  cell->ti_hydro_end_min = 8;
-  cell->ti_hydro_end_max = 8;
+  cell->hydro.ti_old_part = 8;
+  cell->hydro.ti_end_min = 8;
+  cell->hydro.ti_end_max = 8;
   cell->nodeID = NODE_ID;
 
-  // shuffle_particles(cell->parts, cell->count);
+  // shuffle_particles(cell->hydro.parts, cell->hydro.count);
 
-  cell->sorted = 0;
-  for (int k = 0; k < 13; k++) cell->sort[k] = NULL;
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
 
   return cell;
 }
 
 void clean_up(struct cell *ci) {
-  free(ci->parts);
-  free(ci->xparts);
+  free(ci->hydro.parts);
+  free(ci->hydro.xparts);
   for (int k = 0; k < 13; k++)
-    if (ci->sort[k] != NULL) free(ci->sort[k]);
+    if (ci->hydro.sort[k] != NULL) free(ci->hydro.sort[k]);
   free(ci);
 }
 
@@ -397,37 +391,48 @@ void dump_particle_fields(char *fileName, struct cell *main_cell,
   fprintf(file, "# Main cell --------------------------------------------\n");
 
   /* Write main cell */
-  for (int pid = 0; pid < main_cell->count; pid++) {
+  for (int pid = 0; pid < main_cell->hydro.count; pid++) {
     fprintf(file,
             "%6llu %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f "
             "%8.5f "
             "%8.5f %8.5f %13e %13e %13e %13e %13e %8.5f %8.5f\n",
-            main_cell->parts[pid].id, main_cell->parts[pid].x[0],
-            main_cell->parts[pid].x[1], main_cell->parts[pid].x[2],
-            main_cell->parts[pid].v[0], main_cell->parts[pid].v[1],
-            main_cell->parts[pid].v[2], main_cell->parts[pid].h,
-            hydro_get_comoving_density(&main_cell->parts[pid]),
-#if defined(MINIMAL_SPH) || defined(PLANETARY_SPH) ||   \
-    defined(GIZMO_MFV_SPH) || defined(SHADOWFAX_SPH) || \
-    defined(HOPKINS_PU_SPH)
+            main_cell->hydro.parts[pid].id, main_cell->hydro.parts[pid].x[0],
+            main_cell->hydro.parts[pid].x[1], main_cell->hydro.parts[pid].x[2],
+            main_cell->hydro.parts[pid].v[0], main_cell->hydro.parts[pid].v[1],
+            main_cell->hydro.parts[pid].v[2], main_cell->hydro.parts[pid].h,
+            hydro_get_comoving_density(&main_cell->hydro.parts[pid]),
+#if defined(MINIMAL_SPH) || defined(PLANETARY_SPH) ||              \
+    defined(GIZMO_MFV_SPH) || defined(SHADOWFAX_SPH) ||            \
+    defined(HOPKINS_PU_SPH) || defined(HOPKINS_PU_SPH_MONAGHAN) || \
+    defined(ANARCHY_PU_SPH)
             0.f,
+#elif defined(ANARCHY_PU_SPH)
+            main_cell->hydro.parts[pid].viscosity.div_v,
 #else
-            main_cell->parts[pid].density.div_v,
+            main_cell->hydro.parts[pid].density.div_v,
 #endif
-            hydro_get_comoving_entropy(&main_cell->parts[pid]),
-            hydro_get_comoving_internal_energy(&main_cell->parts[pid]),
-            hydro_get_comoving_pressure(&main_cell->parts[pid]),
-            hydro_get_comoving_soundspeed(&main_cell->parts[pid]),
-            main_cell->parts[pid].a_hydro[0], main_cell->parts[pid].a_hydro[1],
-            main_cell->parts[pid].a_hydro[2], main_cell->parts[pid].force.h_dt,
+            hydro_get_drifted_comoving_entropy(&main_cell->hydro.parts[pid]),
+            hydro_get_drifted_comoving_internal_energy(
+                &main_cell->hydro.parts[pid]),
+            hydro_get_comoving_pressure(&main_cell->hydro.parts[pid]),
+            hydro_get_comoving_soundspeed(&main_cell->hydro.parts[pid]),
+            main_cell->hydro.parts[pid].a_hydro[0],
+            main_cell->hydro.parts[pid].a_hydro[1],
+            main_cell->hydro.parts[pid].a_hydro[2],
+            main_cell->hydro.parts[pid].force.h_dt,
 #if defined(GADGET2_SPH)
-            main_cell->parts[pid].force.v_sig, main_cell->parts[pid].entropy_dt,
-            0.f
+            main_cell->hydro.parts[pid].force.v_sig,
+            main_cell->hydro.parts[pid].entropy_dt, 0.f
 #elif defined(DEFAULT_SPH)
-            main_cell->parts[pid].force.v_sig, 0.f,
-            main_cell->parts[pid].force.u_dt
-#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH)
-            main_cell->parts[pid].force.v_sig, 0.f, main_cell->parts[pid].u_dt
+            main_cell->hydro.parts[pid].force.v_sig, 0.f,
+            main_cell->hydro.parts[pid].force.u_dt
+#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH) || \
+    defined(HOPKINS_PU_SPH_MONAGHAN)
+            main_cell->hydro.parts[pid].force.v_sig, 0.f,
+            main_cell->hydro.parts[pid].u_dt
+#elif defined(ANARCHY_PU_SPH)
+            main_cell->hydro.parts[pid].viscosity.v_sig, 0.f,
+            main_cell->hydro.parts[pid].u_dt
 #else
             0.f, 0.f, 0.f
 #endif
@@ -438,7 +443,7 @@ void dump_particle_fields(char *fileName, struct cell *main_cell,
 
     fprintf(file, "# Solution ---------------------------------------------\n");
 
-    for (int pid = 0; pid < main_cell->count; pid++) {
+    for (int pid = 0; pid < main_cell->hydro.count; pid++) {
       fprintf(file,
               "%6llu %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f "
               "%8.5f %8.5f "
@@ -460,9 +465,15 @@ void dump_particle_fields(char *fileName, struct cell *main_cell,
 /* Just a forward declaration... */
 void runner_dopair1_branch_density(struct runner *r, struct cell *ci,
                                    struct cell *cj);
-void runner_doself1_density(struct runner *r, struct cell *ci);
+void runner_doself1_branch_density(struct runner *r, struct cell *ci);
+#ifdef EXTRA_HYDRO_LOOP
+void runner_dopair1_branch_gradient(struct runner *r, struct cell *ci,
+                                    struct cell *cj);
+void runner_doself1_branch_gradient(struct runner *r, struct cell *ci);
+#endif /* EXTRA_HYDRO LOOP */
 void runner_dopair2_branch_force(struct runner *r, struct cell *ci,
                                  struct cell *cj);
+void runner_doself2_branch_force(struct runner *r, struct cell *ci);
 void runner_doself2_force(struct runner *r, struct cell *ci);
 void runner_doself2_force_vec(struct runner *r, struct cell *ci);
 
@@ -586,10 +597,13 @@ int main(int argc, char *argv[]) {
   prog_const.const_newton_G = 1.f;
 
   struct hydro_props hp;
+  hydro_props_init_no_hydro(&hp);
   hp.eta_neighbours = h;
   hp.h_tolerance = 1e0;
   hp.h_max = FLT_MAX;
-  hp.max_smoothing_iterations = 1;
+  hp.h_min = 0.f;
+  hp.h_min_ratio = 0.f;
+  hp.max_smoothing_iterations = 10;
   hp.CFL_condition = 0.1;
 
   struct engine engine;
@@ -640,7 +654,7 @@ int main(int argc, char *argv[]) {
 
   /* Construct the real solution */
   struct solution_part *solution = (struct solution_part *)malloc(
-      main_cell->count * sizeof(struct solution_part));
+      main_cell->hydro.count * sizeof(struct solution_part));
   get_solution(main_cell, solution, rho, vel, press, size);
 
   ticks timings[27];
@@ -657,26 +671,25 @@ int main(int argc, char *argv[]) {
 
     /* Reset particles. */
     for (int i = 0; i < 125; ++i) {
-      for (int pid = 0; pid < cells[i]->count; ++pid)
-        hydro_init_part(&cells[i]->parts[pid], &space.hs);
+      for (int pid = 0; pid < cells[i]->hydro.count; ++pid)
+        hydro_init_part(&cells[i]->hydro.parts[pid], &space.hs);
     }
 
     /* First, sort stuff */
     for (int j = 0; j < 125; ++j)
-      runner_do_sort(&runner, cells[j], 0x1FFF, 0, 0);
+      runner_do_hydro_sort(&runner, cells[j], 0x1FFF, 0, 0);
 
-/* Do the density calculation */
-#if !(defined(MINIMAL_SPH) && defined(WITH_VECTORIZATION))
+      /* Do the density calculation */
 
 /* Initialise the particle cache. */
 #ifdef WITH_VECTORIZATION
     runner.ci_cache.count = 0;
-    cache_init(&runner.ci_cache, 512);
     runner.cj_cache.count = 0;
+    cache_init(&runner.ci_cache, 512);
     cache_init(&runner.cj_cache, 512);
 #endif
 
-    /* Run all the pairs (only once !)*/
+    /* Run all the  (only once !)*/
     for (int i = 0; i < 5; i++) {
       for (int j = 0; j < 5; j++) {
         for (int k = 0; k < 5; k++) {
@@ -708,20 +721,62 @@ int main(int argc, char *argv[]) {
 
     /* And now the self-interaction for the central cells*/
     for (int j = 0; j < 27; ++j)
-      runner_doself1_density(&runner, inner_cells[j]);
-
-#endif
+      runner_doself1_branch_density(&runner, inner_cells[j]);
 
     /* Ghost to finish everything on the central cells */
     for (int j = 0; j < 27; ++j) runner_do_ghost(&runner, inner_cells[j], 0);
 
-/* Do the force calculation */
-#if !(defined(MINIMAL_SPH) && defined(WITH_VECTORIZATION))
+#ifdef EXTRA_HYDRO_LOOP
+    /* We need to do the gradient loop and the extra ghost! */
+    message(
+        "Extra hydro loop detected, running gradient loop in test125cells.");
+
+    /* Run all the pairs (only once !)*/
+    for (int i = 0; i < 5; i++) {
+      for (int j = 0; j < 5; j++) {
+        for (int k = 0; k < 5; k++) {
+
+          struct cell *ci = cells[i * 25 + j * 5 + k];
+
+          for (int ii = -1; ii < 2; ii++) {
+            int iii = i + ii;
+            if (iii < 0 || iii >= 5) continue;
+            iii = (iii + 5) % 5;
+            for (int jj = -1; jj < 2; jj++) {
+              int jjj = j + jj;
+              if (jjj < 0 || jjj >= 5) continue;
+              jjj = (jjj + 5) % 5;
+              for (int kk = -1; kk < 2; kk++) {
+                int kkk = k + kk;
+                if (kkk < 0 || kkk >= 5) continue;
+                kkk = (kkk + 5) % 5;
+
+                struct cell *cj = cells[iii * 25 + jjj * 5 + kkk];
+
+                if (cj > ci) runner_dopair1_branch_gradient(&runner, ci, cj);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    /* And now the self-interaction for the central cells */
+    for (int j = 0; j < 27; ++j)
+      runner_doself1_branch_gradient(&runner, inner_cells[j]);
+
+    /* Extra ghost to finish everything on the central cells */
+    for (int j = 0; j < 27; ++j)
+      runner_do_extra_ghost(&runner, inner_cells[j], 0);
+
+#endif /* EXTRA_HYDRO_LOOP */
+
+      /* Do the force calculation */
 
 #ifdef WITH_VECTORIZATION
     /* Initialise the cache. */
-    runner.ci_cache.count = 0;
-    runner.cj_cache.count = 0;
+    cache_clean(&runner.ci_cache);
+    cache_clean(&runner.cj_cache);
     cache_init(&runner.ci_cache, 512);
     cache_init(&runner.cj_cache, 512);
 #endif
@@ -738,7 +793,7 @@ int main(int argc, char *argv[]) {
 
             const ticks sub_tic = getticks();
 
-            DOPAIR2(&runner, main_cell, cj);
+            runner_dopair2_branch_force(&runner, main_cell, cj);
 
             timings[ctr++] += getticks() - sub_tic;
           }
@@ -749,13 +804,12 @@ int main(int argc, char *argv[]) {
     ticks self_tic = getticks();
 
     /* And now the self-interaction for the main cell */
-    DOSELF2(&runner, main_cell);
+    runner_doself2_branch_force(&runner, main_cell);
 
     timings[26] += getticks() - self_tic;
-#endif
 
     /* Finally, give a gentle kick */
-    runner_do_end_force(&runner, main_cell, 0);
+    runner_do_end_hydro_force(&runner, main_cell, 0);
     const ticks toc = getticks();
     time += toc - tic;
 
@@ -767,8 +821,8 @@ int main(int argc, char *argv[]) {
     }
 
     for (int i = 0; i < 125; ++i) {
-      for (int pid = 0; pid < cells[i]->count; ++pid)
-        hydro_init_part(&cells[i]->parts[pid], &space.hs);
+      for (int pid = 0; pid < cells[i]->hydro.count; ++pid)
+        hydro_init_part(&cells[i]->hydro.parts[pid], &space.hs);
     }
   }
 
@@ -785,11 +839,11 @@ int main(int argc, char *argv[]) {
 
   ticks self_time = timings[26];
 
-  message("Corner calculations took       : %15lli ticks.", corner_time / runs);
-  message("Edge calculations took         : %15lli ticks.", edge_time / runs);
-  message("Face calculations took         : %15lli ticks.", face_time / runs);
-  message("Self calculations took         : %15lli ticks.", self_time / runs);
-  message("SWIFT calculation took         : %15lli ticks.", time / runs);
+  message("Corner calculations took:     %15lli ticks.", corner_time / runs);
+  message("Edge calculations took:       %15lli ticks.", edge_time / runs);
+  message("Face calculations took:       %15lli ticks.", face_time / runs);
+  message("Self calculations took:       %15lli ticks.", self_time / runs);
+  message("SWIFT calculation took:       %15lli ticks.", time / runs);
 
   for (int j = 0; j < 125; ++j)
     reset_particles(cells[j], &space.hs, vel, press, size, rho);
@@ -798,18 +852,17 @@ int main(int argc, char *argv[]) {
 
   const ticks tic = getticks();
 
-/* Kick the central cell */
-// runner_do_kick1(&runner, main_cell, 0);
+  /* Kick the central cell */
+  // runner_do_kick1(&runner, main_cell, 0);
 
-/* And drift it */
-// runner_do_drift_particles(&runner, main_cell, 0);
+  /* And drift it */
+  // runner_do_drift_particles(&runner, main_cell, 0);
 
-/* Initialise the particles */
-// for (int j = 0; j < 125; ++j) runner_do_drift_particles(&runner, cells[j],
-// 0);
+  /* Initialise the particles */
+  // for (int j = 0; j < 125; ++j) runner_do_drift_particles(&runner, cells[j],
+  // 0);
 
-/* Do the density calculation */
-#if !(defined(MINIMAL_SPH) && defined(WITH_VECTORIZATION))
+  /* Do the density calculation */
 
   /* Run all the pairs (only once !)*/
   for (int i = 0; i < 5; i++) {
@@ -844,13 +897,52 @@ int main(int argc, char *argv[]) {
   /* And now the self-interaction for the central cells*/
   for (int j = 0; j < 27; ++j) self_all_density(&runner, inner_cells[j]);
 
-#endif
-
   /* Ghost to finish everything on the central cells */
   for (int j = 0; j < 27; ++j) runner_do_ghost(&runner, inner_cells[j], 0);
 
-/* Do the force calculation */
-#if !(defined(MINIMAL_SPH) && defined(WITH_VECTORIZATION))
+#ifdef EXTRA_HYDRO_LOOP
+  /* We need to do the gradient loop and the extra ghost! */
+
+  /* Run all the pairs (only once !)*/
+  for (int i = 0; i < 5; i++) {
+    for (int j = 0; j < 5; j++) {
+      for (int k = 0; k < 5; k++) {
+
+        struct cell *ci = cells[i * 25 + j * 5 + k];
+
+        for (int ii = -1; ii < 2; ii++) {
+          int iii = i + ii;
+          if (iii < 0 || iii >= 5) continue;
+          iii = (iii + 5) % 5;
+          for (int jj = -1; jj < 2; jj++) {
+            int jjj = j + jj;
+            if (jjj < 0 || jjj >= 5) continue;
+            jjj = (jjj + 5) % 5;
+            for (int kk = -1; kk < 2; kk++) {
+              int kkk = k + kk;
+              if (kkk < 0 || kkk >= 5) continue;
+              kkk = (kkk + 5) % 5;
+
+              struct cell *cj = cells[iii * 25 + jjj * 5 + kkk];
+
+              if (cj > ci) pairs_all_gradient(&runner, ci, cj);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /* And now the self-interaction for the central cells */
+  for (int j = 0; j < 27; ++j) self_all_gradient(&runner, inner_cells[j]);
+
+  /* Extra ghost to finish everything on the central cells */
+  for (int j = 0; j < 27; ++j)
+    runner_do_extra_ghost(&runner, inner_cells[j], 0);
+
+#endif /* EXTRA_HYDRO_LOOP */
+
+  /* Do the force calculation */
 
   /* Do the pairs (for the central 27 cells) */
   for (int i = 1; i < 4; i++) {
@@ -867,16 +959,14 @@ int main(int argc, char *argv[]) {
   /* And now the self-interaction for the main cell */
   self_all_force(&runner, main_cell);
 
-#endif
-
   /* Finally, give a gentle kick */
-  runner_do_end_force(&runner, main_cell, 0);
+  runner_do_end_hydro_force(&runner, main_cell, 0);
   // runner_do_kick2(&runner, main_cell, 0);
 
   const ticks toc = getticks();
 
   /* Output timing */
-  message("Brute force calculation took : %15lli ticks.", toc - tic);
+  message("Brute force calculation took: %15lli ticks.", toc - tic);
 
   sprintf(outputFileName, "brute_force_125_%.150s.dat",
           outputFileNameExtension);
@@ -886,5 +976,10 @@ int main(int argc, char *argv[]) {
   for (int i = 0; i < 125; ++i) clean_up(cells[i]);
   free(solution);
 
+#ifdef WITH_VECTORIZATION
+  cache_clean(&runner.ci_cache);
+  cache_clean(&runner.cj_cache);
+#endif
+
   return 0;
 }
diff --git a/tests/test27cells.c b/tests/test27cells.c
index 1ca6b2c54d901943b0cc748a2241a3a2f9ae9244..cc34f503304feb56799a2d31baa3416b940202d3 100644
--- a/tests/test27cells.c
+++ b/tests/test27cells.c
@@ -101,14 +101,14 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   struct cell *cell = (struct cell *)malloc(sizeof(struct cell));
   bzero(cell, sizeof(struct cell));
 
-  if (posix_memalign((void **)&cell->parts, part_align,
+  if (posix_memalign((void **)&cell->hydro.parts, part_align,
                      count * sizeof(struct part)) != 0) {
     error("couldn't allocate particles, no. of particles: %d", (int)count);
   }
-  bzero(cell->parts, count * sizeof(struct part));
+  bzero(cell->hydro.parts, count * sizeof(struct part));
 
   /* Construct the parts */
-  struct part *part = cell->parts;
+  struct part *part = cell->hydro.parts;
   for (size_t x = 0; x < n; ++x) {
     for (size_t y = 0; y < n; ++y) {
       for (size_t z = 0; z < n; ++z) {
@@ -182,10 +182,10 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
 
   /* Cell properties */
   cell->split = 0;
-  cell->h_max = h_max;
-  cell->count = count;
-  cell->dx_max_part = 0.;
-  cell->dx_max_sort = 0.;
+  cell->hydro.h_max = h_max;
+  cell->hydro.count = count;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
   cell->width[0] = size;
   cell->width[1] = size;
   cell->width[2] = size;
@@ -193,23 +193,23 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   cell->loc[1] = offset[1];
   cell->loc[2] = offset[2];
 
-  cell->ti_old_part = 8;
-  cell->ti_hydro_end_min = 8;
-  cell->ti_hydro_end_max = 8;
+  cell->hydro.ti_old_part = 8;
+  cell->hydro.ti_end_min = 8;
+  cell->hydro.ti_end_max = 8;
   cell->nodeID = NODE_ID;
 
-  shuffle_particles(cell->parts, cell->count);
+  shuffle_particles(cell->hydro.parts, cell->hydro.count);
 
-  cell->sorted = 0;
-  for (int k = 0; k < 13; k++) cell->sort[k] = NULL;
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
 
   return cell;
 }
 
 void clean_up(struct cell *ci) {
-  free(ci->parts);
+  free(ci->hydro.parts);
   for (int k = 0; k < 13; k++)
-    if (ci->sort[k] != NULL) free(ci->sort[k]);
+    if (ci->hydro.sort[k] != NULL) free(ci->hydro.sort[k]);
   free(ci);
 }
 
@@ -229,8 +229,8 @@ void zero_particle_fields(struct cell *c) {
 #else
   struct hydro_space *hspointer = NULL;
 #endif
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_init_part(&c->parts[pid], hspointer);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_init_part(&c->hydro.parts[pid], hspointer);
   }
 }
 
@@ -238,12 +238,12 @@ void zero_particle_fields(struct cell *c) {
  * @brief Ends the loop by adding the appropriate coefficients
  */
 void end_calculation(struct cell *c, const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_end_density(&c->parts[pid], cosmo);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_end_density(&c->hydro.parts[pid], cosmo);
 
     /* Recover the common "Neighbour number" definition */
-    c->parts[pid].density.wcount *= pow_dimension(c->parts[pid].h);
-    c->parts[pid].density.wcount *= kernel_norm;
+    c->hydro.parts[pid].density.wcount *= pow_dimension(c->hydro.parts[pid].h);
+    c->hydro.parts[pid].density.wcount *= kernel_norm;
   }
 }
 
@@ -264,30 +264,37 @@ void dump_particle_fields(char *fileName, struct cell *main_cell,
   fprintf(file, "# Main cell --------------------------------------------\n");
 
   /* Write main cell */
-  for (int pid = 0; pid < main_cell->count; pid++) {
+  for (int pid = 0; pid < main_cell->hydro.count; pid++) {
     fprintf(file,
             "%6llu %10f %10f %10f %10f %10f %10f %13e %13e %13e %13e %13e "
             "%13e %13e %13e\n",
-            main_cell->parts[pid].id, main_cell->parts[pid].x[0],
-            main_cell->parts[pid].x[1], main_cell->parts[pid].x[2],
-            main_cell->parts[pid].v[0], main_cell->parts[pid].v[1],
-            main_cell->parts[pid].v[2],
-            hydro_get_comoving_density(&main_cell->parts[pid]),
+            main_cell->hydro.parts[pid].id, main_cell->hydro.parts[pid].x[0],
+            main_cell->hydro.parts[pid].x[1], main_cell->hydro.parts[pid].x[2],
+            main_cell->hydro.parts[pid].v[0], main_cell->hydro.parts[pid].v[1],
+            main_cell->hydro.parts[pid].v[2],
+            hydro_get_comoving_density(&main_cell->hydro.parts[pid]),
 #if defined(GIZMO_MFV_SPH) || defined(SHADOWFAX_SPH)
             0.f,
-#elif defined(HOPKINS_PU_SPH)
-            main_cell->parts[pid].density.pressure_bar_dh,
+#elif defined(HOPKINS_PU_SPH) || defined(HOPKINS_PU_SPH_MONAGHAN) || \
+    defined(ANARCHY_PU_SPH)
+            main_cell->hydro.parts[pid].density.pressure_bar_dh,
 #else
-            main_cell->parts[pid].density.rho_dh,
+            main_cell->hydro.parts[pid].density.rho_dh,
 #endif
-            main_cell->parts[pid].density.wcount,
-            main_cell->parts[pid].density.wcount_dh,
+            main_cell->hydro.parts[pid].density.wcount,
+            main_cell->hydro.parts[pid].density.wcount_dh,
 #if defined(GADGET2_SPH) || defined(DEFAULT_SPH) || defined(HOPKINS_PE_SPH) || \
-    defined(HOPKINS_PU_SPH)
-            main_cell->parts[pid].density.div_v,
-            main_cell->parts[pid].density.rot_v[0],
-            main_cell->parts[pid].density.rot_v[1],
-            main_cell->parts[pid].density.rot_v[2]
+    defined(HOPKINS_PU_SPH) || defined(HOPKINS_PU_SPH_MONAGHAN)
+            main_cell->hydro.parts[pid].density.div_v,
+            main_cell->hydro.parts[pid].density.rot_v[0],
+            main_cell->hydro.parts[pid].density.rot_v[1],
+            main_cell->hydro.parts[pid].density.rot_v[2]
+#elif defined(ANARCHY_PU_SPH)
+            /* this is required because of the variable AV scheme */
+            main_cell->hydro.parts[pid].viscosity.div_v,
+            main_cell->hydro.parts[pid].density.rot_v[0],
+            main_cell->hydro.parts[pid].density.rot_v[1],
+            main_cell->hydro.parts[pid].density.rot_v[2]
 #else
             0., 0., 0., 0.
 #endif
@@ -305,23 +312,34 @@ void dump_particle_fields(char *fileName, struct cell *main_cell,
                 "# Offset: [%2d %2d %2d] -----------------------------------\n",
                 i - 1, j - 1, k - 1);
 
-        for (int pjd = 0; pjd < cj->count; pjd++) {
+        for (int pjd = 0; pjd < cj->hydro.count; pjd++) {
           fprintf(
               file,
               "%6llu %10f %10f %10f %10f %10f %10f %13e %13e %13e %13e %13e "
               "%13e %13e %13e\n",
-              cj->parts[pjd].id, cj->parts[pjd].x[0], cj->parts[pjd].x[1],
-              cj->parts[pjd].x[2], cj->parts[pjd].v[0], cj->parts[pjd].v[1],
-              cj->parts[pjd].v[2], hydro_get_comoving_density(&cj->parts[pjd]),
+              cj->hydro.parts[pjd].id, cj->hydro.parts[pjd].x[0],
+              cj->hydro.parts[pjd].x[1], cj->hydro.parts[pjd].x[2],
+              cj->hydro.parts[pjd].v[0], cj->hydro.parts[pjd].v[1],
+              cj->hydro.parts[pjd].v[2],
+              hydro_get_comoving_density(&cj->hydro.parts[pjd]),
 #if defined(GIZMO_MFV_SPH) || defined(SHADOWFAX_SPH)
               0.f,
 #else
-              main_cell->parts[pjd].density.rho_dh,
+              main_cell->hydro.parts[pjd].density.rho_dh,
 #endif
-              cj->parts[pjd].density.wcount, cj->parts[pjd].density.wcount_dh,
+              cj->hydro.parts[pjd].density.wcount,
+              cj->hydro.parts[pjd].density.wcount_dh,
 #if defined(GADGET2_SPH) || defined(DEFAULT_SPH) || defined(HOPKINS_PE_SPH)
-              cj->parts[pjd].density.div_v, cj->parts[pjd].density.rot_v[0],
-              cj->parts[pjd].density.rot_v[1], cj->parts[pjd].density.rot_v[2]
+              cj->hydro.parts[pjd].density.div_v,
+              cj->hydro.parts[pjd].density.rot_v[0],
+              cj->hydro.parts[pjd].density.rot_v[1],
+              cj->hydro.parts[pjd].density.rot_v[2]
+#elif defined(ANARCHY_PU_SPH)
+              /* this is required because of the variable AV scheme */
+              cj->hydro.parts[pjd].viscosity.div_v,
+              cj->hydro.parts[pjd].density.rot_v[0],
+              cj->hydro.parts[pjd].density.rot_v[1],
+              cj->hydro.parts[pjd].density.rot_v[2]
 #else
               0., 0., 0., 0.
 #endif
@@ -451,6 +469,7 @@ int main(int argc, char *argv[]) {
   space.dim[2] = 3.;
 
   struct hydro_props hp;
+  hydro_props_init_no_hydro(&hp);
   hp.eta_neighbours = h;
   hp.h_tolerance = 1e0;
   hp.h_max = FLT_MAX;
@@ -486,7 +505,7 @@ int main(int argc, char *argv[]) {
 
         runner_do_drift_part(&runner, cells[i * 9 + j * 3 + k], 0);
 
-        runner_do_sort(&runner, cells[i * 9 + j * 3 + k], 0x1FFF, 0, 0);
+        runner_do_hydro_sort(&runner, cells[i * 9 + j * 3 + k], 0x1FFF, 0, 0);
       }
     }
   }
@@ -514,10 +533,10 @@ int main(int argc, char *argv[]) {
 #if defined(TEST_DOSELF_SUBSET) || defined(TEST_DOPAIR_SUBSET)
     int *pid = NULL;
     int count = 0;
-    if ((pid = (int *)malloc(sizeof(int) * main_cell->count)) == NULL)
+    if ((pid = (int *)malloc(sizeof(int) * main_cell->hydro.count)) == NULL)
       error("Can't allocate memory for pid.");
-    for (int k = 0; k < main_cell->count; k++)
-      if (part_is_active(&main_cell->parts[k], &engine)) {
+    for (int k = 0; k < main_cell->hydro.count; k++)
+      if (part_is_active(&main_cell->hydro.parts[k], &engine)) {
         pid[count] = k;
         ++count;
       }
@@ -529,7 +548,7 @@ int main(int argc, char *argv[]) {
         const ticks sub_tic = getticks();
 
 #ifdef TEST_DOPAIR_SUBSET
-        DOPAIR1_SUBSET(&runner, main_cell, main_cell->parts, pid, count,
+        DOPAIR1_SUBSET(&runner, main_cell, main_cell->hydro.parts, pid, count,
                        cells[j]);
 #else
         DOPAIR1(&runner, main_cell, cells[j]);
@@ -543,7 +562,7 @@ int main(int argc, char *argv[]) {
     const ticks self_tic = getticks();
 
 #ifdef TEST_DOSELF_SUBSET
-    DOSELF1_SUBSET(&runner, main_cell, main_cell->parts, pid, count);
+    DOSELF1_SUBSET(&runner, main_cell, main_cell->hydro.parts, pid, count);
 #else
     DOSELF1(&runner, main_cell);
 #endif
@@ -610,5 +629,10 @@ int main(int argc, char *argv[]) {
   /* Clean things to make the sanitizer happy ... */
   for (int i = 0; i < 27; ++i) clean_up(cells[i]);
 
+#ifdef WITH_VECTORIZATION
+  cache_clean(&runner.ci_cache);
+  cache_clean(&runner.cj_cache);
+#endif
+
   return 0;
 }
diff --git a/tests/test27cellsStars.c b/tests/test27cellsStars.c
new file mode 100644
index 0000000000000000000000000000000000000000..e7e1b64b1cc99d8a51cf380fde1560bdd634ae20
--- /dev/null
+++ b/tests/test27cellsStars.c
@@ -0,0 +1,546 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2015 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+/* Some standard headers. */
+#include <fenv.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/* Local headers. */
+#include "swift.h"
+
+#define DOSELF1 runner_doself_branch_stars_density
+#define DOSELF1_SUBSET runner_doself_subset_branch_stars_density
+#ifdef TEST_DOSELF_SUBSET
+#define DOSELF1_NAME "runner_doself_subset_branch_stars_density"
+#else
+#define DOSELF1_NAME "runner_doself1_branch_stars_density"
+#endif
+
+#define DOPAIR1 runner_dopair_branch_stars_density
+#define DOPAIR1_SUBSET runner_dopair_subset_branch_stars_density
+#ifdef TEST_DOPAIR_SUBSET
+#define DOPAIR1_NAME "runner_dopair_subset_branch_stars_density"
+#else
+#define DOPAIR1_NAME "runner_dopair_branch_stars_density"
+#endif
+
+#define NODE_ID 0
+
+/**
+ * @brief Constructs a cell and all of its particle in a valid state prior to
+ * a DOPAIR or DOSELF calcuation.
+ *
+ * @param n The cube root of the number of particles.
+ * @param n_stars The cube root of the number of star particles.
+ * @param offset The position of the cell offset from (0,0,0).
+ * @param size The cell size.
+ * @param h The smoothing length of the particles in units of the inter-particle
+ * separation.
+ * @param partId The running counter of IDs for gas.
+ * @param spartId The running counter of IDs for stars.
+ * @param pert The perturbation to apply to the particles in the cell in units
+ * of the inter-particle separation.
+ * @param h_pert The perturbation to apply to the smoothing length.
+ */
+struct cell *make_cell(size_t n, size_t n_stars, double *offset, double size,
+                       double h, long long *partId, long long *spartId,
+                       double pert, double h_pert) {
+  const size_t count = n * n * n;
+  const size_t scount = n_stars * n_stars * n_stars;
+  float h_max = 0.f;
+  struct cell *cell = (struct cell *)malloc(sizeof(struct cell));
+  bzero(cell, sizeof(struct cell));
+
+  if (posix_memalign((void **)&cell->hydro.parts, part_align,
+                     count * sizeof(struct part)) != 0) {
+    error("couldn't allocate particles, no. of particles: %d", (int)count);
+  }
+  bzero(cell->hydro.parts, count * sizeof(struct part));
+
+  /* Construct the parts */
+  struct part *part = cell->hydro.parts;
+  for (size_t x = 0; x < n; ++x) {
+    for (size_t y = 0; y < n; ++y) {
+      for (size_t z = 0; z < n; ++z) {
+        part->x[0] =
+            offset[0] +
+            size * (x + 0.5 + random_uniform(-0.5, 0.5) * pert) / (float)n;
+        part->x[1] =
+            offset[1] +
+            size * (y + 0.5 + random_uniform(-0.5, 0.5) * pert) / (float)n;
+        part->x[2] =
+            offset[2] +
+            size * (z + 0.5 + random_uniform(-0.5, 0.5) * pert) / (float)n;
+
+        part->v[0] = 0;
+        part->v[1] = 0;
+        part->v[2] = 0;
+        if (h_pert)
+          part->h = size * h * random_uniform(1.f, h_pert) / (float)n;
+        else
+          part->h = size * h / (float)n;
+        h_max = fmaxf(h_max, part->h);
+        part->id = ++(*partId);
+
+        part->time_bin = 1;
+
+#ifdef SWIFT_DEBUG_CHECKS
+        part->ti_drift = 8;
+        part->ti_kick = 8;
+#endif
+        ++part;
+      }
+    }
+  }
+
+  /* Construct the sparts */
+  if (posix_memalign((void **)&cell->stars.parts, spart_align,
+                     scount * sizeof(struct spart)) != 0) {
+    error("couldn't allocate particles, no. of particles: %d", (int)scount);
+  }
+  bzero(cell->stars.parts, scount * sizeof(struct spart));
+
+  struct spart *spart = cell->stars.parts;
+  for (size_t x = 0; x < n_stars; ++x) {
+    for (size_t y = 0; y < n_stars; ++y) {
+      for (size_t z = 0; z < n_stars; ++z) {
+        spart->x[0] =
+            offset[0] + size * (x + 0.5 + random_uniform(-0.5, 0.5) * pert) /
+                            (float)n_stars;
+        spart->x[1] =
+            offset[1] + size * (y + 0.5 + random_uniform(-0.5, 0.5) * pert) /
+                            (float)n_stars;
+        spart->x[2] =
+            offset[2] + size * (z + 0.5 + random_uniform(-0.5, 0.5) * pert) /
+                            (float)n_stars;
+
+        spart->v[0] = 0;
+        spart->v[1] = 0;
+        spart->v[2] = 0;
+        if (h_pert)
+          spart->h = size * h * random_uniform(1.f, h_pert) / (float)n_stars;
+        else
+          spart->h = size * h / (float)n_stars;
+        h_max = fmaxf(h_max, spart->h);
+        spart->id = ++(*spartId);
+
+        spart->time_bin = 1;
+
+#ifdef SWIFT_DEBUG_CHECKS
+        spart->ti_drift = 8;
+        spart->ti_kick = 8;
+#endif
+        ++spart;
+      }
+    }
+  }
+
+  /* Cell properties */
+  cell->split = 0;
+  cell->hydro.h_max = h_max;
+  cell->hydro.count = count;
+  cell->stars.count = scount;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
+  cell->stars.dx_max_sort = 0.;
+  cell->width[0] = size;
+  cell->width[1] = size;
+  cell->width[2] = size;
+  cell->loc[0] = offset[0];
+  cell->loc[1] = offset[1];
+  cell->loc[2] = offset[2];
+
+  cell->stars.ti_old_part = 8;
+  cell->stars.ti_end_min = 8;
+  cell->stars.ti_end_max = 8;
+  cell->hydro.ti_old_part = 8;
+  cell->hydro.ti_end_min = 8;
+  cell->hydro.ti_end_max = 8;
+  cell->grav.ti_old_part = 8;
+  cell->grav.ti_end_min = 8;
+  cell->grav.ti_end_max = 8;
+  cell->nodeID = NODE_ID;
+
+  shuffle_particles(cell->hydro.parts, cell->hydro.count);
+  shuffle_sparticles(cell->stars.parts, cell->stars.count);
+
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
+
+  cell->stars.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->stars.sort[k] = NULL;
+
+  return cell;
+}
+
+void clean_up(struct cell *ci) {
+  free(ci->hydro.parts);
+  free(ci->stars.parts);
+  for (int k = 0; k < 13; k++)
+    if (ci->hydro.sort[k] != NULL) free(ci->hydro.sort[k]);
+  free(ci);
+}
+
+/**
+ * @brief Initializes all particles field to be ready for a density calculation
+ */
+void zero_particle_fields(struct cell *c) {
+  for (int pid = 0; pid < c->stars.count; pid++) {
+    stars_init_spart(&c->stars.parts[pid]);
+  }
+}
+
+/**
+ * @brief Ends the loop by adding the appropriate coefficients
+ */
+void end_calculation(struct cell *c, const struct cosmology *cosmo) {
+  for (int pid = 0; pid < c->stars.count; pid++) {
+    stars_end_density(&c->stars.parts[pid], cosmo);
+
+    /* Recover the common "Neighbour number" definition */
+    c->stars.parts[pid].density.wcount *= pow_dimension(c->stars.parts[pid].h);
+    c->stars.parts[pid].density.wcount *= kernel_norm;
+  }
+}
+
+/**
+ * @brief Dump all the particles to a file
+ */
+void dump_particle_fields(char *fileName, struct cell *main_cell,
+                          struct cell **cells) {
+  FILE *file = fopen(fileName, "w");
+
+  /* Write header */
+  fprintf(file, "# %4s %10s %10s %10s %13s %13s\n", "ID", "pos_x", "pos_y",
+          "pos_z", "wcount", "wcount_dh");
+
+  fprintf(file, "# Main cell --------------------------------------------\n");
+
+  /* Write main cell */
+  for (int pid = 0; pid < main_cell->stars.count; pid++) {
+    fprintf(file, "%6llu %10f %10f %10f %13e %13e\n",
+            main_cell->stars.parts[pid].id, main_cell->stars.parts[pid].x[0],
+            main_cell->stars.parts[pid].x[1], main_cell->stars.parts[pid].x[2],
+            main_cell->stars.parts[pid].density.wcount,
+            main_cell->stars.parts[pid].density.wcount_dh);
+  }
+
+  /* Write all other cells */
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      for (int k = 0; k < 3; ++k) {
+        struct cell *cj = cells[i * 9 + j * 3 + k];
+        if (cj == main_cell) continue;
+
+        fprintf(file,
+                "# Offset: [%2d %2d %2d] -----------------------------------\n",
+                i - 1, j - 1, k - 1);
+
+        for (int pjd = 0; pjd < cj->stars.count; pjd++) {
+          fprintf(file, "%6llu %10f %10f %10f %13e %13e\n",
+                  cj->stars.parts[pjd].id, cj->stars.parts[pjd].x[0],
+                  cj->stars.parts[pjd].x[1], cj->stars.parts[pjd].x[2],
+                  cj->stars.parts[pjd].density.wcount,
+                  cj->stars.parts[pjd].density.wcount_dh);
+        }
+      }
+    }
+  }
+  fclose(file);
+}
+
+/* Just a forward declaration... */
+void runner_dopair_branch_stars_density(struct runner *r, struct cell *ci,
+                                        struct cell *cj);
+void runner_doself_branch_stars_density(struct runner *r, struct cell *c);
+void runner_dopair_subset_branch_stars_density(struct runner *r,
+                                               struct cell *restrict ci,
+                                               struct spart *restrict sparts_i,
+                                               int *restrict ind, int scount,
+                                               struct cell *restrict cj);
+void runner_doself_subset_branch_stars_density(struct runner *r,
+                                               struct cell *restrict ci,
+                                               struct spart *restrict sparts,
+                                               int *restrict ind, int scount);
+
+/* And go... */
+int main(int argc, char *argv[]) {
+
+#ifdef HAVE_SETAFFINITY
+  engine_pin();
+#endif
+
+  size_t runs = 0, particles = 0;
+  size_t sparticles = 0;
+  double h = 1.23485, size = 1.;
+  double perturbation = 0., h_pert = 0.;
+  char outputFileNameExtension[100] = "";
+  char outputFileName[200] = "";
+
+  /* Initialize CPU frequency, this also starts time. */
+  unsigned long long cpufreq = 0;
+  clocks_set_cpufreq(cpufreq);
+
+/* Choke on FP-exceptions */
+#ifdef HAVE_FE_ENABLE_EXCEPT
+  feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
+#endif
+
+  /* Get some randomness going */
+  srand(0);
+
+  char c;
+  while ((c = getopt(argc, argv, "s:h:p:n:N:r:t:d:f:")) != -1) {
+    switch (c) {
+      case 'h':
+        sscanf(optarg, "%lf", &h);
+        break;
+      case 'p':
+        sscanf(optarg, "%lf", &h_pert);
+        break;
+      case 's':
+        sscanf(optarg, "%lf", &size);
+        break;
+      case 'n':
+        sscanf(optarg, "%zu", &particles);
+        break;
+      case 'N':
+        sscanf(optarg, "%zu", &sparticles);
+        break;
+      case 'r':
+        sscanf(optarg, "%zu", &runs);
+        break;
+      case 'd':
+        sscanf(optarg, "%lf", &perturbation);
+        break;
+      case 'f':
+        strcpy(outputFileNameExtension, optarg);
+        break;
+      case '?':
+        error("Unknown option.");
+        break;
+    }
+  }
+
+  if (h < 0 || particles == 0 || runs == 0 || sparticles == 0) {
+    printf(
+        "\nUsage: %s -n PARTICLES_PER_AXIS -N SPARTICLES_PER_AXIS -r "
+        "NUMBER_OF_RUNS [OPTIONS...]\n"
+        "\nGenerates 27 cells, filled with particles on a Cartesian grid."
+        "\nThese are then interacted using runner_dopair_stars_density() and "
+        "runner_doself_stars_density()."
+        "\n\nOptions:"
+        "\n-h DISTANCE=1.2348 - Smoothing length in units of <x>"
+        "\n-p                 - Random fractional change in h, h=h*random(1,p)"
+        "\n-s size            - Physical size of the cell"
+        "\n-d pert            - Perturbation to apply to the particles [0,1["
+        "\n-f fileName        - Part of the file name used to save the dumps\n",
+        argv[0]);
+    exit(1);
+  }
+
+  /* Help users... */
+  message("DOSELF1 function called: %s", DOSELF1_NAME);
+  message("DOPAIR1 function called: %s", DOPAIR1_NAME);
+  message("Smoothing length: h = %f", h * size);
+  message("Kernel:               %s", kernel_name);
+  message("Neighbour target: N = %f", pow_dimension(h) * kernel_norm);
+
+  printf("\n");
+
+  /* Build the infrastructure */
+  struct space space;
+  space.periodic = 1;
+  space.dim[0] = 3.;
+  space.dim[1] = 3.;
+  space.dim[2] = 3.;
+
+  struct hydro_props hp;
+  hp.eta_neighbours = h;
+  hp.h_tolerance = 1e0;
+  hp.h_max = FLT_MAX;
+  hp.max_smoothing_iterations = 1;
+  hp.CFL_condition = 0.1;
+
+  struct stars_props stars_p;
+  stars_p.eta_neighbours = h;
+  stars_p.h_tolerance = 1e0;
+  stars_p.max_smoothing_iterations = 1;
+
+  struct engine engine;
+  engine.s = &space;
+  engine.time = 0.1f;
+  engine.ti_current = 8;
+  engine.max_active_bin = num_time_bins;
+  engine.hydro_properties = &hp;
+  engine.stars_properties = &stars_p;
+  engine.nodeID = NODE_ID;
+
+  struct cosmology cosmo;
+  cosmology_init_no_cosmo(&cosmo);
+  engine.cosmology = &cosmo;
+
+  struct runner runner;
+  runner.e = &engine;
+
+  /* Construct some cells */
+  struct cell *cells[27];
+  struct cell *main_cell;
+  static long long partId = 0;
+  long long spartId = particles * particles * particles * 27;
+
+  for (int i = 0; i < 3; ++i) {
+    for (int j = 0; j < 3; ++j) {
+      for (int k = 0; k < 3; ++k) {
+        double offset[3] = {i * size, j * size, k * size};
+        cells[i * 9 + j * 3 + k] =
+            make_cell(particles, sparticles, offset, size, h, &partId, &spartId,
+                      perturbation, h_pert);
+
+        runner_do_drift_part(&runner, cells[i * 9 + j * 3 + k], 0);
+        runner_do_drift_spart(&runner, cells[i * 9 + j * 3 + k], 0);
+
+        runner_do_hydro_sort(&runner, cells[i * 9 + j * 3 + k], 0x1FFF, 0, 0);
+        runner_do_stars_sort(&runner, cells[i * 9 + j * 3 + k], 0x1FFF, 0, 0);
+      }
+    }
+  }
+
+  /* Store the main cell for future use */
+  main_cell = cells[13];
+
+  ticks timings[27];
+  for (int i = 0; i < 27; i++) timings[i] = 0;
+
+  ticks time = 0;
+  for (size_t i = 0; i < runs; ++i) {
+    /* Zero the fields */
+    for (int j = 0; j < 27; ++j) zero_particle_fields(cells[j]);
+
+    const ticks tic = getticks();
+
+#if defined(TEST_DOSELF_SUBSET) || defined(TEST_DOPAIR_SUBSET)
+    int *pid = NULL;
+    int scount = 0;
+    if ((pid = (int *)malloc(sizeof(int) * main_cell->stars.count)) == NULL)
+      error("Can't allocate memory for pid.");
+    for (int k = 0; k < main_cell->stars.count; k++)
+      if (spart_is_active(&main_cell->stars.parts[k], &engine)) {
+        pid[scount] = k;
+        ++scount;
+      }
+#endif
+
+    /* Run all the pairs */
+    for (int j = 0; j < 27; ++j) {
+      if (cells[j] != main_cell) {
+        const ticks sub_tic = getticks();
+
+#ifdef TEST_DOPAIR_SUBSET
+        DOPAIR1_SUBSET(&runner, main_cell, main_cell->stars.parts, pid, scount,
+                       cells[j]);
+#else
+        DOPAIR1(&runner, main_cell, cells[j]);
+#endif
+
+        timings[j] += getticks() - sub_tic;
+      }
+    }
+
+    /* And now the self-interaction */
+    const ticks self_tic = getticks();
+
+#ifdef TEST_DOSELF_SUBSET
+    DOSELF1_SUBSET(&runner, main_cell, main_cell->stars.parts, pid, scount);
+#else
+    DOSELF1(&runner, main_cell);
+#endif
+
+    timings[13] += getticks() - self_tic;
+
+    const ticks toc = getticks();
+    time += toc - tic;
+
+    /* Let's get physical ! */
+    end_calculation(main_cell, &cosmo);
+
+    /* Dump if necessary */
+    if (i % 50 == 0) {
+      sprintf(outputFileName, "swift_star_dopair_27_%.150s.dat",
+              outputFileNameExtension);
+      dump_particle_fields(outputFileName, main_cell, cells);
+    }
+  }
+
+  /* Output timing */
+  ticks corner_time = timings[0] + timings[2] + timings[6] + timings[8] +
+                      timings[18] + timings[20] + timings[24] + timings[26];
+
+  ticks edge_time = timings[1] + timings[3] + timings[5] + timings[7] +
+                    timings[9] + timings[11] + timings[15] + timings[17] +
+                    timings[19] + timings[21] + timings[23] + timings[25];
+
+  ticks face_time = timings[4] + timings[10] + timings[12] + timings[14] +
+                    timings[16] + timings[22];
+
+  message("Corner calculations took       : %15lli ticks.", corner_time / runs);
+  message("Edge calculations took         : %15lli ticks.", edge_time / runs);
+  message("Face calculations took         : %15lli ticks.", face_time / runs);
+  message("Self calculations took         : %15lli ticks.", timings[13] / runs);
+  message("SWIFT calculation took         : %15lli ticks.", time / runs);
+
+  /* Now perform a brute-force version for accuracy tests */
+
+  /* Zero the fields */
+  for (int i = 0; i < 27; ++i) zero_particle_fields(cells[i]);
+
+  const ticks tic = getticks();
+
+  /* Run all the brute-force pairs */
+  for (int j = 0; j < 27; ++j)
+    if (cells[j] != main_cell)
+      pairs_all_stars_density(&runner, main_cell, cells[j]);
+
+  /* And now the self-interaction */
+  self_all_stars_density(&runner, main_cell);
+
+  const ticks toc = getticks();
+
+  /* Let's get physical ! */
+  end_calculation(main_cell, &cosmo);
+
+  /* Dump */
+  sprintf(outputFileName, "star_brute_force_27_%.150s.dat",
+          outputFileNameExtension);
+  dump_particle_fields(outputFileName, main_cell, cells);
+
+  /* Output timing */
+  message("Brute force calculation took : %15lli ticks.", toc - tic);
+
+  /* Clean things to make the sanitizer happy ... */
+  for (int i = 0; i < 27; ++i) clean_up(cells[i]);
+
+  return 0;
+}
diff --git a/tests/test27cellsStars.sh.in b/tests/test27cellsStars.sh.in
new file mode 100644
index 0000000000000000000000000000000000000000..5385b86fca6bcd24878f51567266eb81b7c21772
--- /dev/null
+++ b/tests/test27cellsStars.sh.in
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# List each test that should be run
+declare -a TEST_LIST=(test27cellsStars test27cellsStars_subset)
+
+# Run same test for each executable
+for TEST in "${TEST_LIST[@]}"
+do
+  # Test for particles with the same smoothing length
+    echo ""
+    
+    rm -f star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat
+
+    echo "Running ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard"
+    ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard
+
+    if [ -e star_brute_force_27_standard.dat ]
+    then
+      if python @srcdir@/difffloat.py star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat @srcdir@/star_tolerance_27_normal.dat 6
+      then
+        echo "Accuracy test passed"
+      else
+        echo "Accuracy test failed"
+        exit 1
+      fi
+    else
+      echo "Error Missing test output file"
+      exit 1
+    fi
+
+    echo "------------"
+
+
+  # Test for particles with random smoothing lengths
+    echo ""
+
+    rm -f star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat
+
+    echo "Running ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard -p 1.1"
+    ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard -p 1.1
+
+    if [ -e star_brute_force_27_standard.dat ]
+    then
+      if python @srcdir@/difffloat.py star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat @srcdir@/star_tolerance_27_perturbed_h.dat 6
+      then
+        echo "Accuracy test passed"
+      else
+        echo "Accuracy test failed"
+        exit 1
+      fi
+    else
+      echo "Error Missing test output file"
+      exit 1
+    fi
+
+    echo "------------"
+
+
+  # Test for particles with random smoothing lengths
+    echo ""
+
+    rm -f star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat
+
+    echo "Running ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard -p 1.3"
+    ./$TEST -n 6 -N 7 -r 1 -d 0 -f standard -p 1.3
+
+    if [ -e star_brute_force_27_standard.dat ]
+    then
+      if python @srcdir@/difffloat.py star_brute_force_27_standard.dat swift_star_dopair_27_standard.dat @srcdir@/star_tolerance_27_perturbed_h2.dat 6
+      then
+        echo "Accuracy test passed"
+      else
+        echo "Accuracy test failed"
+        exit 1
+      fi
+    else
+      echo "Error Missing test output file"
+      exit 1
+    fi
+
+    echo "------------"
+
+done
+
+exit $?
diff --git a/tests/test27cellsStarsPerturbed.sh.in b/tests/test27cellsStarsPerturbed.sh.in
new file mode 100644
index 0000000000000000000000000000000000000000..ddf258fc17e6054d801ea9c73b4d0bd274cfad12
--- /dev/null
+++ b/tests/test27cellsStarsPerturbed.sh.in
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+# Test for particles with the same smoothing length
+    echo ""
+
+    rm -f star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat
+
+    echo "Running ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed"
+    ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed
+
+    if [ -e star_brute_force_27_perturbed.dat ]
+    then
+	if python @srcdir@/difffloat.py star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat @srcdir@/star_tolerance_27_perturbed.dat 6
+	then
+	    echo "Accuracy test passed"
+	else
+	    echo "Accuracy test failed"
+	    exit 1
+	fi
+    else
+	echo "Error Missing test output file"
+	exit 1
+    fi
+
+    echo "------------"
+
+# Test for particles with random smoothing lengths
+    echo ""
+
+    rm -f star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat
+
+    echo "Running ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed -p 1.1"
+    ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed -p 1.1
+
+    if [ -e star_brute_force_27_perturbed.dat ]
+    then
+	if python @srcdir@/difffloat.py star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat @srcdir@/star_tolerance_27_perturbed_h.dat 6
+	then
+	    echo "Accuracy test passed"
+	else
+	    echo "Accuracy test failed"
+	    exit 1
+	fi
+    else
+	echo "Error Missing test output file"
+	exit 1
+    fi
+
+    echo "------------"
+
+
+# Test for particles with random smoothing lengths
+    echo ""
+
+    rm -f star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat
+
+    echo "Running ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed -p 1.3"
+    ./test27cellsStars -n 6 -N 7 -r 1 -d 0.1 -f perturbed -p 1.3
+
+    if [ -e star_brute_force_27_perturbed.dat ]
+    then
+	if python @srcdir@/difffloat.py star_brute_force_27_perturbed.dat swift_star_dopair_27_perturbed.dat @srcdir@/star_tolerance_27_perturbed_h2.dat 6
+	then
+	    echo "Accuracy test passed"
+	else
+	    echo "Accuracy test failed"
+	    exit 1
+	fi
+    else
+	echo "Error Missing test output file"
+	exit 1
+    fi
+
+    echo "------------"
+
+exit $?
diff --git a/tests/testActivePair.c b/tests/testActivePair.c
index 6889a18887894af0a9434f786df21dbf842e87e5..54a3189b89d9de757bf340bf759db5b40f947174 100644
--- a/tests/testActivePair.c
+++ b/tests/testActivePair.c
@@ -33,7 +33,8 @@
 
 /* Typdef function pointer for interaction function. */
 typedef void (*interaction_func)(struct runner *, struct cell *, struct cell *);
-typedef void (*init_func)(struct cell *, const struct cosmology *);
+typedef void (*init_func)(struct cell *, const struct cosmology *,
+                          const struct hydro_props *);
 typedef void (*finalise_func)(struct cell *, const struct cosmology *);
 
 /**
@@ -62,14 +63,14 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   struct cell *cell = (struct cell *)malloc(sizeof(struct cell));
   bzero(cell, sizeof(struct cell));
 
-  if (posix_memalign((void **)&cell->parts, part_align,
+  if (posix_memalign((void **)&cell->hydro.parts, part_align,
                      count * sizeof(struct part)) != 0) {
     error("couldn't allocate particles, no. of particles: %d", (int)count);
   }
-  bzero(cell->parts, count * sizeof(struct part));
+  bzero(cell->hydro.parts, count * sizeof(struct part));
 
   /* Construct the parts */
-  struct part *part = cell->parts;
+  struct part *part = cell->hydro.parts;
   for (size_t x = 0; x < n; ++x) {
     for (size_t y = 0; y < n; ++y) {
       for (size_t z = 0; z < n; ++z) {
@@ -110,7 +111,8 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
 /* Set the thermodynamic variable */
 #if defined(GADGET2_SPH)
         part->entropy = 1.f;
-#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH)
+#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH) || \
+    defined(HOPKINS_PU_SPH_MONAGHAN) || defined(ANARCHY_PU_SPH)
         part->u = 1.f;
 #elif defined(HOPKINS_PE_SPH)
         part->entropy = 1.f;
@@ -135,10 +137,10 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
 
   /* Cell properties */
   cell->split = 0;
-  cell->h_max = h_max;
-  cell->count = count;
-  cell->dx_max_part = 0.;
-  cell->dx_max_sort = 0.;
+  cell->hydro.h_max = h_max;
+  cell->hydro.count = count;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
   cell->width[0] = size;
   cell->width[1] = size;
   cell->width[2] = size;
@@ -146,43 +148,44 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   cell->loc[1] = offset[1];
   cell->loc[2] = offset[2];
 
-  cell->ti_old_part = 8;
-  cell->ti_hydro_end_min = 8;
-  cell->ti_hydro_end_max = 10;
+  cell->hydro.ti_old_part = 8;
+  cell->hydro.ti_end_min = 8;
+  cell->hydro.ti_end_max = 10;
   cell->nodeID = NODE_ID;
 
-  shuffle_particles(cell->parts, cell->count);
+  shuffle_particles(cell->hydro.parts, cell->hydro.count);
 
-  cell->sorted = 0;
-  for (int k = 0; k < 13; k++) cell->sort[k] = NULL;
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
 
   return cell;
 }
 
 void clean_up(struct cell *ci) {
-  free(ci->parts);
+  free(ci->hydro.parts);
   for (int k = 0; k < 13; k++)
-    if (ci->sort[k] != NULL) free(ci->sort[k]);
+    if (ci->hydro.sort[k] != NULL) free(ci->hydro.sort[k]);
   free(ci);
 }
 
 /**
  * @brief Initializes all particles field to be ready for a density calculation
  */
-void zero_particle_fields_density(struct cell *c,
-                                  const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_init_part(&c->parts[pid], NULL);
+void zero_particle_fields_density(struct cell *c, const struct cosmology *cosmo,
+                                  const struct hydro_props *hydro_props) {
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_init_part(&c->hydro.parts[pid], NULL);
   }
 }
 
 /**
  * @brief Initializes all particles field to be ready for a force calculation
  */
-void zero_particle_fields_force(struct cell *c, const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    struct part *p = &c->parts[pid];
-    struct xpart *xp = &c->xparts[pid];
+void zero_particle_fields_force(struct cell *c, const struct cosmology *cosmo,
+                                const struct hydro_props *hydro_props) {
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    struct part *p = &c->hydro.parts[pid];
+    struct xpart *xp = &c->hydro.xparts[pid];
 
 /* Mimic the result of a density calculation */
 #ifdef GADGET2_SPH
@@ -209,7 +212,8 @@ void zero_particle_fields_force(struct cell *c, const struct cosmology *cosmo) {
     p->density.wcount = 48.f / (kernel_norm * pow_dimension(p->h));
     p->density.wcount_dh = 0.f;
 #endif /* PRESSURE-ENTROPY */
-#ifdef HOPKINS_PU_SPH
+#if defined(HOPKINS_PU_SPH) || defined(HOPKINS_PU_SPH_MONAGHAN) || \
+    defined(ANARCHY_PU_SPH)
     p->rho = 1.f;
     p->pressure_bar = 0.6666666;
     p->density.rho_dh = 0.f;
@@ -217,9 +221,16 @@ void zero_particle_fields_force(struct cell *c, const struct cosmology *cosmo) {
     p->density.wcount = 48.f / (kernel_norm * pow_dimension(p->h));
     p->density.wcount_dh = 0.f;
 #endif /* PRESSURE-ENERGY */
+#if defined(ANARCHY_PU_SPH)
+    /* Initialise viscosity variables */
+    p->viscosity.alpha = 0.8;
+    p->viscosity.div_v = 0.f;
+    p->viscosity.div_v_previous_step = 0.f;
+    p->viscosity.v_sig = hydro_get_comoving_soundspeed(p);
+#endif /* ANARCHY_PU_SPH viscosity variables */
 
     /* And prepare for a round of force tasks. */
-    hydro_prepare_force(p, xp, cosmo);
+    hydro_prepare_force(p, xp, cosmo, hydro_props, 0.);
     hydro_reset_acceleration(p);
   }
 }
@@ -228,12 +239,12 @@ void zero_particle_fields_force(struct cell *c, const struct cosmology *cosmo) {
  * @brief Ends the density loop by adding the appropriate coefficients
  */
 void end_calculation_density(struct cell *c, const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_end_density(&c->parts[pid], cosmo);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_end_density(&c->hydro.parts[pid], cosmo);
 
     /* Recover the common "Neighbour number" definition */
-    c->parts[pid].density.wcount *= pow_dimension(c->parts[pid].h);
-    c->parts[pid].density.wcount *= kernel_norm;
+    c->hydro.parts[pid].density.wcount *= pow_dimension(c->hydro.parts[pid].h);
+    c->hydro.parts[pid].density.wcount *= kernel_norm;
   }
 }
 
@@ -241,8 +252,8 @@ void end_calculation_density(struct cell *c, const struct cosmology *cosmo) {
  * @brief Ends the force loop by adding the appropriate coefficients
  */
 void end_calculation_force(struct cell *c, const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_end_force(&c->parts[pid], cosmo);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_end_force(&c->hydro.parts[pid], cosmo);
   }
 }
 
@@ -257,16 +268,18 @@ void dump_particle_fields(char *fileName, struct cell *ci, struct cell *cj) {
 
   fprintf(file, "# ci --------------------------------------------\n");
 
-  for (int pid = 0; pid < ci->count; pid++) {
-    fprintf(file, "%6llu %13e %13e\n", ci->parts[pid].id,
-            ci->parts[pid].density.wcount, ci->parts[pid].force.h_dt);
+  for (int pid = 0; pid < ci->hydro.count; pid++) {
+    fprintf(file, "%6llu %13e %13e\n", ci->hydro.parts[pid].id,
+            ci->hydro.parts[pid].density.wcount,
+            ci->hydro.parts[pid].force.h_dt);
   }
 
   fprintf(file, "# cj --------------------------------------------\n");
 
-  for (int pjd = 0; pjd < cj->count; pjd++) {
-    fprintf(file, "%6llu %13e %13e\n", cj->parts[pjd].id,
-            cj->parts[pjd].density.wcount, cj->parts[pjd].force.h_dt);
+  for (int pjd = 0; pjd < cj->hydro.count; pjd++) {
+    fprintf(file, "%6llu %13e %13e\n", cj->hydro.parts[pjd].id,
+            cj->hydro.parts[pjd].density.wcount,
+            cj->hydro.parts[pjd].force.h_dt);
   }
 
   fclose(file);
@@ -293,12 +306,12 @@ void test_pair_interactions(struct runner *runner, struct cell **ci,
                             interaction_func vec_interaction, init_func init,
                             finalise_func finalise) {
 
-  runner_do_sort(runner, *ci, 0x1FFF, 0, 0);
-  runner_do_sort(runner, *cj, 0x1FFF, 0, 0);
+  runner_do_hydro_sort(runner, *ci, 0x1FFF, 0, 0);
+  runner_do_hydro_sort(runner, *cj, 0x1FFF, 0, 0);
 
   /* Zero the fields */
-  init(*ci, runner->e->cosmology);
-  init(*cj, runner->e->cosmology);
+  init(*ci, runner->e->cosmology, runner->e->hydro_properties);
+  init(*cj, runner->e->cosmology, runner->e->hydro_properties);
 
   /* Run the test */
   vec_interaction(runner, *ci, *cj);
@@ -313,8 +326,8 @@ void test_pair_interactions(struct runner *runner, struct cell **ci,
   /* Now perform a brute-force version for accuracy tests */
 
   /* Zero the fields */
-  init(*ci, runner->e->cosmology);
-  init(*cj, runner->e->cosmology);
+  init(*ci, runner->e->cosmology, runner->e->hydro_properties);
+  init(*cj, runner->e->cosmology, runner->e->hydro_properties);
 
   /* Run the brute-force test */
   serial_interaction(runner, *ci, *cj);
@@ -485,6 +498,7 @@ int main(int argc, char *argv[]) {
   struct space space;
   struct engine engine;
   struct cosmology cosmo;
+  struct hydro_props hydro_props;
   struct runner *runner;
   char c;
   static long long partId = 0;
@@ -569,6 +583,8 @@ int main(int argc, char *argv[]) {
 
   cosmology_init_no_cosmo(&cosmo);
   engine.cosmology = &cosmo;
+  hydro_props_init_no_hydro(&hydro_props);
+  engine.hydro_properties = &hydro_props;
 
   if (posix_memalign((void **)&runner, SWIFT_STRUCT_ALIGNMENT,
                      sizeof(struct runner)) != 0) {
diff --git a/tests/testAdiabaticIndex.c b/tests/testAdiabaticIndex.c
index 60ecefa264f48bed2d4df205766dc392a1a03d0f..6aa794207f0e23e6a26060f3ef98b7ee841d7a32 100644
--- a/tests/testAdiabaticIndex.c
+++ b/tests/testAdiabaticIndex.c
@@ -34,7 +34,8 @@
  */
 void check_value(float a, float b, const char* s) {
   if (fabsf(a - b) / fabsf(a + b) > 1.e-6f)
-    error("Values are inconsistent: %12.15e %12.15e (%s)!", a, b, s);
+    error("Values are inconsistent: %12.15e %12.15e rel=%e (%s)!", a, b,
+          fabsf(a - b) / fabsf(a + b), s);
 }
 
 /**
@@ -77,36 +78,61 @@ void check_constants(void) {
 void check_functions(float x) {
 
   float val_a, val_b;
+  const double xx = x;
+
+#if defined(HYDRO_GAMMA_5_3)
+#define hydro_gamma_d (5. / 3.)
+#elif defined(HYDRO_GAMMA_7_5)
+#define hydro_gamma_d (7. / 5.)
+#elif defined(HYDRO_GAMMA_4_3)
+#define hydro_gamma_d (4. / 3.)
+#elif defined(HYDRO_GAMMA_2_1)
+#define hydro_gamma_d (2. / 1.)
+#else
+#error "Need to choose an adiabatic index!"
+#endif
+
+  val_a = pow(xx, hydro_gamma_d);
+  val_b = pow_gamma(x);
+  check_value(val_a, val_b, "x^gamma");
+
+  val_a = pow(xx, hydro_gamma_d - 1.0);
+  val_b = pow_gamma_minus_one(x);
+  check_value(val_a, val_b, "x^(gamma - 1)");
+
+  val_a = pow(xx, -(hydro_gamma_d - 1.0));
+  val_b = pow_minus_gamma_minus_one(x);
+  check_value(val_a, val_b, "x^(-(gamma - 1))");
 
-  val_a = powf(x, -hydro_gamma);
+  val_a = pow(xx, -hydro_gamma_d);
   val_b = pow_minus_gamma(x);
   check_value(val_a, val_b, "x^(-gamma)");
 
-  val_a = powf(x, 2.0f / (hydro_gamma - 1.0f));
+  val_a = pow(xx, 2.0 / (hydro_gamma_d - 1.0));
   val_b = pow_two_over_gamma_minus_one(x);
   check_value(val_a, val_b, "x^(2/(gamma-1))");
 
-  val_a = powf(x, 2.0f * hydro_gamma / (hydro_gamma - 1.0f));
+  val_a = pow(xx, 2.0 * hydro_gamma_d / (hydro_gamma_d - 1.0));
   val_b = pow_two_gamma_over_gamma_minus_one(x);
   check_value(val_a, val_b, "x^((2 gamma)/(gamma-1))");
 
-  val_a = powf(x, 0.5f * (hydro_gamma - 1.0f) / hydro_gamma);
+  val_a = pow(xx, (hydro_gamma_d - 1.0) / (2.0 * hydro_gamma_d));
   val_b = pow_gamma_minus_one_over_two_gamma(x);
   check_value(val_a, val_b, "x^((gamma-1)/(2 gamma))");
 
-  val_a = powf(x, -0.5f * (hydro_gamma + 1.0f) / hydro_gamma);
+  val_a = pow(xx, -(hydro_gamma_d + 1.0) / (2.0 * hydro_gamma_d));
   val_b = pow_minus_gamma_plus_one_over_two_gamma(x);
   check_value(val_a, val_b, "x^(-(gamma+1)/(2 gamma))");
 
-  val_a = powf(x, 1.0f / hydro_gamma);
+  val_a = pow(xx, 1.0 / hydro_gamma_d);
   val_b = pow_one_over_gamma(x);
   check_value(val_a, val_b, "x^(1/gamma)");
 
-  val_a = powf(x, 3.f * hydro_gamma - 2.f);
+  val_a = pow(xx, 3. * hydro_gamma_d - 2.);
   val_b = pow_three_gamma_minus_two(x);
   check_value(val_a, val_b, "x^(3gamma - 2)");
 
-  val_a = powf(x, (3.f * hydro_gamma - 5.f) / 2.f);
+  val_a = pow(xx, (3. * hydro_gamma_d - 5.) / 2.);
   val_b = pow_three_gamma_minus_five_over_two(x);
   check_value(val_a, val_b, "x^((3gamma - 5)/2)");
 }
diff --git a/tests/testCbrt.c b/tests/testCbrt.c
index b608f9a00d619570c298f4123038f930584a245c..bba379902b2bbc16bd49a5bbba0917100b4d60a7 100644
--- a/tests/testCbrt.c
+++ b/tests/testCbrt.c
@@ -38,7 +38,9 @@ int main(int argc, char *argv[]) {
   clocks_set_cpufreq(cpufreq);
 
   /* Choke on FP-exceptions */
+#ifdef HAVE_FE_ENABLE_EXCEPT
   feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
+#endif
 
   /* Some constants for this test. */
   const int num_vals = 200000000;
@@ -59,8 +61,8 @@ int main(int argc, char *argv[]) {
   for (int k = 0; k < num_vals; k++) {
     const double exact = cbrt(data[k]);  // computed in double just to be sure.
     const float ours = 1.0f / icbrtf(data[k]);
-    const float err_abs = fabsf(exact - ours);
-    const float err_rel = 0.5f * fabsf(exact - ours) / (exact + ours);
+    const float err_abs = fabs(exact - ours);
+    const float err_rel = 0.5f * fabs(exact - ours) / (exact + ours);
 
     if (err_rel > err_rel_tol && data[k] != 0.f)
       error(
@@ -125,5 +127,6 @@ int main(int argc, char *argv[]) {
   message("x * icbrtf   took %9.3f %s (acc = %18.11e).",
           clocks_from_ticks(getticks() - tic_ours), clocks_getunit(), acc_ours);
 
+  free(data);
   return 0;
 }
diff --git a/tests/testCooling.c b/tests/testCooling.c
new file mode 100644
index 0000000000000000000000000000000000000000..727a9638b09b871e866fe787438a5707fd43ec6b
--- /dev/null
+++ b/tests/testCooling.c
@@ -0,0 +1,204 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2015 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+#include "../config.h"
+
+/* Local headers. */
+#include "swift.h"
+
+#if 0
+
+/*
+ * @brief Assign particle density and entropy corresponding to the
+ * hydrogen number density and internal energy specified.
+ *
+ * @param p Particle data structure
+ * @param xp extra particle structure
+ * @param us unit system struct
+ * @param cooling Cooling function data structure
+ * @param cosmo Cosmology data structure
+ * @param phys_const Physical constants data structure
+ * @param nh_cgs Hydrogen number density (cgs units)
+ * @param u Internal energy (cgs units)
+ * @param ti_current integertime to set cosmo quantities
+ */
+void set_quantities(struct part *restrict p, struct xpart *restrict xp,
+                    const struct unit_system *restrict us,
+                    const struct cooling_function_data *restrict cooling,
+                    struct cosmology *restrict cosmo,
+                    const struct phys_const *restrict phys_const, float nh_cgs,
+                    double u, integertime_t ti_current) {
+
+  /* Update cosmology quantities */
+  cosmology_update(cosmo, phys_const, ti_current);
+
+  /* calculate density */
+  double hydrogen_number_density = nh_cgs / cooling->number_density_scale;
+  p->rho = hydrogen_number_density * phys_const->const_proton_mass /
+           p->chemistry_data.metal_mass_fraction[chemistry_element_H] *
+           (cosmo->a * cosmo->a * cosmo->a);
+
+  /* update entropy based on internal energy */
+  float pressure = (u * cosmo->a * cosmo->a) / cooling->internal_energy_scale *
+                   p->rho * (hydro_gamma_minus_one);
+  p->entropy = pressure * (pow(p->rho, -hydro_gamma));
+  xp->entropy_full = p->entropy;
+}
+
+/*
+ * @brief Produces contributions to cooling rates for different
+ * hydrogen number densities, from different metals,
+ * tests 1d and 4d table interpolations produce
+ * same results for cooling rate, dlambda/du and temperature.
+ */
+int main(int argc, char **argv) {
+  // Declare relevant structs
+  struct swift_params *params = malloc(sizeof(struct swift_params));
+  struct unit_system us;
+  struct chemistry_global_data chem_data;
+  struct part p;
+  struct xpart xp;
+  struct phys_const phys_const;
+  struct cooling_function_data cooling;
+  struct cosmology cosmo;
+  char *parametersFileName = "./testCooling.yml";
+
+  float nh;  // hydrogen number density
+  double u;  // internal energy
+
+  /* Number of values to test for in redshift,
+   * hydrogen number density and internal energy */
+  const int n_z = 50;
+  const int n_nh = 50;
+  const int n_u = 50;
+
+  /* Number of subcycles and tolerance used to compare
+   * subcycled and implicit solution. Note, high value
+   * of tolerance due to mismatch between explicit and
+   * implicit solution for large timesteps */
+  const int n_subcycle = 1000;
+  const float integration_tolerance = 0.2;
+
+  /* Set dt */
+  const float dt_cool = 1.0e-5;
+  const float dt_therm = 1.0e-5;
+
+  /* Read the parameter file */
+  if (params == NULL) error("Error allocating memory for the parameter file.");
+  message("Reading runtime parameters from file '%s'", parametersFileName);
+  parser_read_file(parametersFileName, params);
+
+  /* Init units */
+  units_init_from_params(&us, params, "InternalUnitSystem");
+  phys_const_init(&us, params, &phys_const);
+
+  /* Init chemistry */
+  chemistry_init(params, &us, &phys_const, &chem_data);
+  chemistry_first_init_part(&phys_const, &us, &cosmo, &chem_data, &p, &xp);
+  chemistry_print(&chem_data);
+
+  /* Init cosmology */
+  cosmology_init(params, &us, &phys_const, &cosmo);
+  cosmology_print(&cosmo);
+
+  /* Init cooling */
+  cooling_init(params, &us, &phys_const, &cooling);
+  cooling_print(&cooling);
+  cooling_update(&cosmo, &cooling, 0);
+
+  /* Calculate abundance ratios */
+  float *abundance_ratio;
+  abundance_ratio = malloc((chemistry_element_count + 2) * sizeof(float));
+  abundance_ratio_to_solar(&p, &cooling, abundance_ratio);
+
+  /* extract mass fractions, calculate table indices and offsets */
+  float XH = p.chemistry_data.metal_mass_fraction[chemistry_element_H];
+  float HeFrac =
+      p.chemistry_data.metal_mass_fraction[chemistry_element_He] /
+      (XH + p.chemistry_data.metal_mass_fraction[chemistry_element_He]);
+  int He_i;
+  float d_He;
+  get_index_1d(cooling.HeFrac, cooling.N_He, HeFrac, &He_i, &d_He);
+
+  /* Cooling function needs to know the minimal energy. Set it to the lowest
+   * internal energy in the cooling table. */
+  struct hydro_props hydro_properties;
+  hydro_properties.minimal_internal_energy =
+      exp(M_LN10 * cooling.Therm[0]) / cooling.internal_energy_scale;
+
+  /* calculate spacing in nh and u */
+  const float delta_nh = (cooling.nH[cooling.N_nH - 1] - cooling.nH[0]) / n_nh;
+  const float delta_u =
+      (cooling.Therm[cooling.N_Temp - 1] - cooling.Therm[0]) / n_u;
+
+  for (int z_i = 0; z_i < n_z; z_i++) {
+    integertime_t ti_current = max_nr_timesteps / n_z * z_i;
+    for (int nh_i = 0; nh_i < n_nh; nh_i++) {
+      nh = exp(M_LN10 * cooling.nH[0] + delta_nh * nh_i);
+      for (int u_i = 0; u_i < n_u; u_i++) {
+        u = exp(M_LN10 * cooling.Therm[0] + delta_u * u_i);
+
+        /* update nh, u, z */
+        set_quantities(&p, &xp, &us, &cooling, &cosmo, &phys_const, nh, u,
+                       ti_current);
+
+        /* calculate subcycled solution */
+        for (int t_subcycle = 0; t_subcycle < n_subcycle; t_subcycle++) {
+          p.entropy_dt = 0;
+          cooling_cool_part(&phys_const, &us, &cosmo, &hydro_properties,
+                            &cooling, &p, &xp, dt_cool / n_subcycle,
+                            dt_therm / n_subcycle);
+          xp.entropy_full += p.entropy_dt * dt_therm / n_subcycle;
+        }
+        double u_subcycled =
+            hydro_get_physical_internal_energy(&p, &xp, &cosmo) *
+            cooling.internal_energy_scale;
+
+        /* reset quantities to nh, u, and z that we want to test */
+        set_quantities(&p, &xp, &us, &cooling, &cosmo, &phys_const, nh, u,
+                       ti_current);
+
+        /* compute implicit solution */
+        cooling_cool_part(&phys_const, &us, &cosmo, &hydro_properties, &cooling,
+                          &p, &xp, dt_cool, dt_therm);
+        double u_implicit =
+            hydro_get_physical_internal_energy(&p, &xp, &cosmo) *
+            cooling.internal_energy_scale;
+
+        /* check if the two solutions are consistent */
+        if (fabs((u_implicit - u_subcycled) / u_subcycled) >
+            integration_tolerance)
+          message(
+              "implicit and subcycled solutions do not match. z_i %d nh_i %d "
+              "u_i %d implicit %.5e subcycled %.5e error %.5e",
+              z_i, nh_i, u_i, u_implicit, u_subcycled,
+              fabs((u_implicit - u_subcycled) / u_subcycled));
+      }
+    }
+  }
+  message("done test");
+
+  free(params);
+  return 0;
+}
+
+#else
+
+int main(int argc, char **argv) { return 0; }
+
+#endif
diff --git a/tests/testCooling.yml b/tests/testCooling.yml
new file mode 100644
index 0000000000000000000000000000000000000000..faec32cdfec20b48af7341889c79b60bd2f6bb5b
--- /dev/null
+++ b/tests/testCooling.yml
@@ -0,0 +1,107 @@
+# Define the system of units to use internally. 
+InternalUnitSystem:
+  UnitMass_in_cgs:     1.989e43      # 10^10 M_sun in grams
+  UnitLength_in_cgs:   3.085678e24   # Mpc in centimeters
+  UnitVelocity_in_cgs: 1e5           # km/s in centimeters per second
+  UnitCurrent_in_cgs:  1             # Amperes
+  UnitTemp_in_cgs:     1             # Kelvin
+
+# Cosmological parameters
+Cosmology:
+  h:              0.6777        # Reduced Hubble constant
+  a_begin:        0.1     # Initial scale-factor of the simulation
+  a_end:          1.0           # Final scale factor of the simulation
+  Omega_m:        0.307         # Matter density parameter
+  Omega_lambda:   0.693         # Dark-energy density parameter
+  Omega_b:        0.0455        # Baryon density parameter
+
+# Parameters governing the time integration
+TimeIntegration:
+  time_begin: 0.    # The starting time of the simulation (in internal units).
+  time_end:   1e-2  # The end time of the simulation (in internal units).
+  dt_min:     1e-10 # The minimal time-step size of the simulation (in internal units).
+  dt_max:     1e-7  # The maximal time-step size of the simulation (in internal units).
+  
+Scheduler:
+  max_top_level_cells:    15
+  
+# Parameters governing the snapshots
+Snapshots:
+  basename:            coolingBox # Common part of the name of output files
+  scale_factor_first: 0.142857142857  # Scale-factor of the first snaphot (cosmological run)
+  time_first:          0.01  # Time of the first output (non-cosmological run) (in internal units)
+  delta_time: 1.00002  # Time difference between consecutive outputs (in internal units)
+  compression:         1
+
+# Parameters governing the conserved quantities statistics
+Statistics:
+  scale_factor_first: 0.142857142857 # Scale-factor of the first stat dump (cosmological run)
+  time_first:          0.01 # Time of the first stat dump (non-cosmological run) (in internal units)
+  delta_time: 1.00002 # Time between statistics output
+
+# Parameters for the self-gravity scheme
+Gravity:
+  eta:                    0.025     # Constant dimensionless multiplier for time integration.
+  theta:                  0.85      # Opening angle (Multipole acceptance criterion)
+  comoving_softening:     0.0026994 # Comoving softening length (in internal units).
+  max_physical_softening: 0.0007    # Physical softening length (in internal units).
+  
+# Parameters for the hydrodynamics scheme
+SPH:
+  resolution_eta:        1.2348   # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
+  CFL_condition:         0.1      # Courant-Friedrich-Levy condition for time integration.
+  minimal_temperature: 100.       # Kelvin
+  
+# Parameters related to the initial conditions
+InitialConditions:
+  file_name:  ./coolingBox.hdf5     # The file to read
+  periodic:   1
+
+# Dimensionless pre-factor for the time-step condition
+LambdaCooling:
+  lambda_nH2_cgs:              1e-22 # Cooling rate divided by square Hydrogen number density (in cgs units [erg * s^-1 * cm^3])
+  cooling_tstep_mult:          1.0        # Dimensionless pre-factor for the time-step condition
+
+# Dimensionless constant cooling (AB 13/02/18)
+ConstCooling:
+  cooling_rate:         10000.0
+  min_energy:           0.0
+  cooling_tstep_mult:   1.0
+
+# Cooling with Grackle 2.0
+GrackleCooling:
+  CloudyTable: CloudyData_UVB=HM2012.h5 # Name of the Cloudy Table (available on the grackle bitbucket repository)
+  WithUVbackground: 0 # Enable or not the UV background
+  Redshift: 0 # Redshift to use (-1 means time based redshift)
+  WithMetalCooling: 1 # Enable or not the metal cooling
+  ProvideVolumetricHeatingRates: 0 # User provide volumetric heating rates
+  ProvideSpecificHeatingRates: 0 # User provide specific heating rates
+  SelfShieldingMethod: 0 # Grackle (<= 3) or Gear self shielding method
+  OutputMode: 1 # Write in output corresponding primordial chemistry mode
+  MaxSteps: 1000
+  ConvergenceLimit: 1e-2
+
+EagleCooling:
+  filename:                /cosma5/data/Eagle/BG_Tables/CoolingTables/
+  reionisation_redshift:   8.989
+  He_reion_z_centre:       3.5
+  He_reion_z_sigma:        0.5
+  He_reion_ev_pH:          2.0
+
+EAGLEChemistry:
+  InitMetallicity:         0.014
+  InitAbundance_Hydrogen:  0.70649785
+  InitAbundance_Helium:    0.28055534
+  InitAbundance_Carbon:    2.0665436e-3
+  InitAbundance_Nitrogen:  8.3562563e-4
+  InitAbundance_Oxygen:    5.4926244e-3
+  InitAbundance_Neon:      1.4144605e-3
+  InitAbundance_Magnesium: 5.907064e-4
+  InitAbundance_Silicon:   6.825874e-4
+  InitAbundance_Iron:      1.1032152e-3
+  CalciumOverSilicon:      0.0941736
+  SulphurOverSilicon:      0.6054160
+
+GearChemistry:
+  InitialMetallicity: 0.01295
+
diff --git a/tests/testCosmology.c b/tests/testCosmology.c
index bafad55471453f7308d1498daa15dbae3a76a6bc..05dc69b2a925b0e19a9bb4a20ac1003b4b30704b 100644
--- a/tests/testCosmology.c
+++ b/tests/testCosmology.c
@@ -18,7 +18,7 @@
  ******************************************************************************/
 
 /* Some standard headers. */
-#include <stdlib.h>
+#include "../config.h"
 
 /* Includes. */
 #include "swift.h"
diff --git a/tests/testDump.c b/tests/testDump.c
index f47a44256536d6ac1d9676c844f7081a6daa5ca4..878daae9cc0deddd6f9fb02857041f705110743c 100644
--- a/tests/testDump.c
+++ b/tests/testDump.c
@@ -73,7 +73,7 @@ int main(int argc, char *argv[]) {
   for (int run = 0; run < num_runs; run++) {
 
     /* Ensure capacity. */
-    dump_ensure(&d, 7 * chunk_size);
+    dump_ensure(&d, 7 * chunk_size, 7 * chunk_size);
 
     /* Dump a few numbers. */
     printf("dumping %i chunks...\n", chunk_size);
diff --git a/tests/testGravityDerivatives.c b/tests/testGravityDerivatives.c
index 184d66db623f34963dc91915c12fc58fbaa4ec4d..f31967de7075bccfb2c7fb19c1ba262aa12da54f 100644
--- a/tests/testGravityDerivatives.c
+++ b/tests/testGravityDerivatives.c
@@ -943,6 +943,13 @@ int main(int argc, char* argv[]) {
 
     message("Testing gravity for r=(%e %e %e)", dx, dy, dz);
 
+    const double r_s = 100. * ((double)rand() / (RAND_MAX));
+    const double r_s_inv = 1. / r_s;
+
+    const int periodic = 0;
+
+    message("Mesh scale r_s=%e periodic=%d", r_s, periodic);
+
     /* Compute distance */
     const double r2 = dx * dx + dy * dy + dz * dz;
     const double r_inv = 1. / sqrt(r2);
@@ -952,8 +959,8 @@ int main(int argc, char* argv[]) {
 
     /* Compute all derivatives */
     struct potential_derivatives_M2L pot;
-    compute_potential_derivatives_M2L(dx, dy, dz, r2, r_inv, eps, eps_inv,
-                                      /*periodic*/ 0, /* 1/r_s */ 0., &pot);
+    potential_derivatives_compute_M2L(dx, dy, dz, r2, r_inv, eps, eps_inv,
+                                      periodic, r_s_inv, &pot);
 
     /* Minimal value we care about */
     const double min = 1e-9;
diff --git a/tests/testInteractions.c b/tests/testInteractions.c
index 306f14a35ca047430f67e33e9fd63848e9207b68..e14fddd640764c7e22a217fb483791494ba4fae0 100644
--- a/tests/testInteractions.c
+++ b/tests/testInteractions.c
@@ -16,12 +16,16 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
 
+/* Some standard headers. */
 #include <fenv.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+
+/* Local includes */
 #include "swift.h"
 
 /* Other schemes need to be added here if they are not vectorized, otherwise
@@ -107,9 +111,10 @@ struct part *make_particles(size_t count, double *offset, double spacing,
  */
 void prepare_force(struct part *parts, size_t count) {
 
-#if !defined(GIZMO_MFV_SPH) && !defined(SHADOWFAX_SPH) && \
-    !defined(MINIMAL_SPH) && !defined(PLANETARY_SPH) &&   \
-    !defined(HOPKINS_PU_SPH)
+#if !defined(GIZMO_MFV_SPH) && !defined(SHADOWFAX_SPH) &&            \
+    !defined(MINIMAL_SPH) && !defined(PLANETARY_SPH) &&              \
+    !defined(HOPKINS_PU_SPH) && !defined(HOPKINS_PU_SPH_MONAGHAN) && \
+    !defined(ANARCHY_PU_SPH)
   struct part *p;
   for (size_t i = 0; i < count; ++i) {
     p = &parts[i];
@@ -141,14 +146,16 @@ void dump_indv_particle_fields(char *fileName, struct part *p) {
 #else
           p->density.div_v,
 #endif
-          hydro_get_comoving_entropy(p), hydro_get_comoving_internal_energy(p),
+          hydro_get_drifted_comoving_entropy(p),
+          hydro_get_drifted_comoving_internal_energy(p),
           hydro_get_comoving_pressure(p), hydro_get_comoving_soundspeed(p),
           p->a_hydro[0], p->a_hydro[1], p->a_hydro[2], p->force.h_dt,
 #if defined(GADGET2_SPH)
           p->force.v_sig, p->entropy_dt, 0.f
 #elif defined(DEFAULT_SPH)
           p->force.v_sig, 0.f, p->force.u_dt
-#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH)
+#elif defined(MINIMAL_SPH) || defined(HOPKINS_PU_SPH) || \
+    defined(HOPKINS_PU_SPH_MONAGHAN) || defined(ANARCHY_PU_SPH)
           p->force.v_sig, 0.f, p->u_dt
 #else
           0.f, 0.f, 0.f
@@ -191,10 +198,10 @@ int check_results(struct part serial_test_part, struct part *serial_parts,
                   struct part vec_test_part, struct part *vec_parts,
                   int count) {
   int result = 0;
-  result += compare_particles(serial_test_part, vec_test_part, ACC_THRESHOLD);
+  result += compare_particles(&serial_test_part, &vec_test_part, ACC_THRESHOLD);
 
   for (int i = 0; i < count; i++)
-    result += compare_particles(serial_parts[i], vec_parts[i], ACC_THRESHOLD);
+    result += compare_particles(&serial_parts[i], &vec_parts[i], ACC_THRESHOLD);
 
   return result;
 }
@@ -552,7 +559,8 @@ void test_force_interactions(struct part test_part, struct part *parts,
       vizq[i] = pi_vec.v[2];
       rhoiq[i] = pi_vec.rho;
       grad_hiq[i] = pi_vec.force.f;
-#if !defined(HOPKINS_PU_SPH)
+#if !defined(HOPKINS_PU_SPH) && !defined(HOPKINS_PU_SPH_MONAGHAN) && \
+    !defined(ANARCHY_PU_SPH)
       pOrhoi2q[i] = pi_vec.force.P_over_rho2;
 #endif
       balsaraiq[i] = pi_vec.force.balsara;
@@ -565,7 +573,8 @@ void test_force_interactions(struct part test_part, struct part *parts,
       vjzq[i] = pj_vec[i].v[2];
       rhojq[i] = pj_vec[i].rho;
       grad_hjq[i] = pj_vec[i].force.f;
-#if !defined(HOPKINS_PU_SPH)
+#if !defined(HOPKINS_PU_SPH) && !defined(HOPKINS_PU_SPH_MONAGHAN) && \
+    !defined(ANARCHY_PU_SPH)
       pOrhoj2q[i] = pj_vec[i].force.P_over_rho2;
 #endif
       balsarajq[i] = pj_vec[i].force.balsara;
@@ -647,7 +656,8 @@ void test_force_interactions(struct part test_part, struct part *parts,
     VEC_HADD(a_hydro_zSum, piq[0]->a_hydro[2]);
     VEC_HADD(h_dtSum, piq[0]->force.h_dt);
     VEC_HMAX(v_sigSum, piq[0]->force.v_sig);
-#if !defined(HOPKINS_PU_SPH)
+#if !defined(HOPKINS_PU_SPH) && !defined(HOPKINS_PU_SPH_MONAGHAN) && \
+    !defined(ANARCHY_PU_SPH)
     VEC_HADD(entropy_dtSum, piq[0]->entropy_dt);
 #endif
 
diff --git a/tests/testLogger.c b/tests/testLogger.c
index ee933500ab585d286c9dea7370b0d208573ca7d2..c5be0d7cc18742bdc2fa6167462579c45fd43e92 100644
--- a/tests/testLogger.c
+++ b/tests/testLogger.c
@@ -20,7 +20,8 @@
 /* Config parameters. */
 #include "../config.h"
 
-#ifdef HAVE_POSIX_FALLOCATE /* Are we on a sensible platform? */
+#if defined(HAVE_POSIX_FALLOCATE) && \
+    defined(WITH_LOGGER) /* Are we on a sensible platform? */
 
 /* Some standard headers. */
 #include <stdio.h>
@@ -31,7 +32,8 @@
 /* Local headers. */
 #include "swift.h"
 
-void test_log_parts(struct dump *d) {
+void test_log_parts(struct logger *log) {
+  struct dump *d = log->dump;
 
   /* Write several copies of a part to the dump. */
   struct part p;
@@ -43,22 +45,22 @@ void test_log_parts(struct dump *d) {
   size_t offset = d->count;
 
   /* Write the full part. */
-  logger_log_part(&p,
+  logger_log_part(log, &p,
                   logger_mask_x | logger_mask_v | logger_mask_a |
                       logger_mask_u | logger_mask_h | logger_mask_rho |
                       logger_mask_consts,
-                  &offset, d);
+                  &offset);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Write only the position. */
   p.x[0] = 2.0;
-  logger_log_part(&p, logger_mask_x, &offset, d);
+  logger_log_part(log, &p, logger_mask_x, &offset);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Write the position and velocity. */
   p.x[0] = 3.0;
   p.v[0] = 0.3;
-  logger_log_part(&p, logger_mask_x | logger_mask_v, &offset, d);
+  logger_log_part(log, &p, logger_mask_x | logger_mask_v, &offset);
   printf("Wrote part at offset %#016zx.\n", offset);
 
   /* Recover the last part from the dump. */
@@ -101,7 +103,8 @@ void test_log_parts(struct dump *d) {
   }
 }
 
-void test_log_gparts(struct dump *d) {
+void test_log_gparts(struct logger *log) {
+  struct dump *d = log->dump;
 
   /* Write several copies of a part to the dump. */
   struct gpart p;
@@ -113,21 +116,21 @@ void test_log_gparts(struct dump *d) {
   size_t offset = d->count;
 
   /* Write the full part. */
-  logger_log_gpart(&p,
+  logger_log_gpart(log, &p,
                    logger_mask_x | logger_mask_v | logger_mask_a |
                        logger_mask_h | logger_mask_consts,
-                   &offset, d);
+                   &offset);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Write only the position. */
   p.x[0] = 2.0;
-  logger_log_gpart(&p, logger_mask_x, &offset, d);
+  logger_log_gpart(log, &p, logger_mask_x, &offset);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Write the position and velocity. */
   p.x[0] = 3.0;
   p.v_full[0] = 0.3;
-  logger_log_gpart(&p, logger_mask_x | logger_mask_v, &offset, d);
+  logger_log_gpart(log, &p, logger_mask_x | logger_mask_v, &offset);
   printf("Wrote gpart at offset %#016zx.\n", offset);
 
   /* Recover the last part from the dump. */
@@ -170,82 +173,100 @@ void test_log_gparts(struct dump *d) {
   }
 }
 
-void test_log_timestamps(struct dump *d) {
+void test_log_timestamps(struct logger *log) {
+  struct dump *d = log->dump;
 
   /* The timestamp to log. */
   unsigned long long int t = 10;
+  double time = 0.1;
 
   /* Start with an offset at the end of the dump. */
   size_t offset = d->count;
 
   /* Log three consecutive timestamps. */
-  logger_log_timestamp(t, &offset, d);
+  logger_log_timestamp(log, t, time, &offset);
   printf("Logged timestamp %020llu at offset %#016zx.\n", t, offset);
   t += 10;
-  logger_log_timestamp(t, &offset, d);
+  time = 0.2;
+  logger_log_timestamp(log, t, time, &offset);
   printf("Logged timestamp %020llu at offset %#016zx.\n", t, offset);
   t += 10;
-  logger_log_timestamp(t, &offset, d);
+  time = 0.3;
+  logger_log_timestamp(log, t, time, &offset);
   printf("Logged timestamp %020llu at offset %#016zx.\n", t, offset);
 
   /* Recover the three timestamps. */
   size_t offset_old = offset;
   t = 0;
-  int mask = logger_read_timestamp(&t, &offset, (const char *)d->data);
+  time = 0;
+  int mask = logger_read_timestamp(&t, &time, &offset, (const char *)d->data);
   printf("Recovered timestamp %020llu at offset %#016zx with mask %#04x.\n", t,
          offset_old, mask);
   if (t != 30) {
     printf("FAIL: could not recover correct timestamp.\n");
     abort();
   }
+  if (time != 0.3) {
+    printf("FAIL: could not recover correct time %g.\n", time);
+    abort();
+  }
 
   offset_old = offset;
   t = 0;
-  mask = logger_read_timestamp(&t, &offset, (const char *)d->data);
+  time = 0;
+  mask = logger_read_timestamp(&t, &time, &offset, (const char *)d->data);
   printf("Recovered timestamp %020llu at offset %#016zx with mask %#04x.\n", t,
          offset_old, mask);
   if (t != 20) {
     printf("FAIL: could not recover correct timestamp.\n");
     abort();
   }
+  if (time != 0.2) {
+    printf("FAIL: could not recover correct time.\n");
+    abort();
+  }
 
   offset_old = offset;
   t = 0;
-  mask = logger_read_timestamp(&t, &offset, (const char *)d->data);
+  time = 0;
+  mask = logger_read_timestamp(&t, &time, &offset, (const char *)d->data);
   printf("Recovered timestamp %020llu at offset %#016zx with mask %#04x.\n", t,
          offset_old, mask);
   if (t != 10) {
     printf("FAIL: could not recover correct timestamp.\n");
     abort();
   }
+  if (time != 0.1) {
+    printf("FAIL: could not recover correct time.\n");
+    abort();
+  }
 }
 
 int main(int argc, char *argv[]) {
 
-  /* Some constants. */
-  char filename[256];
-  const int now = time(NULL);
-  sprintf(filename, "/tmp/SWIFT_logger_test_%d.out", now);
-
-  /* Prepare a dump. */
-  struct dump d;
-  dump_init(&d, filename, 1024 * 1024);
+  /* Prepare a logger. */
+  struct logger log;
+  struct swift_params params;
+  parser_read_file("logger.yml", &params);
+  logger_init(&log, &params);
 
   /* Test writing/reading parts. */
-  test_log_parts(&d);
+  test_log_parts(&log);
 
   /* Test writing/reading gparts. */
-  test_log_gparts(&d);
+  test_log_gparts(&log);
 
   /* Test writing/reading timestamps. */
-  test_log_timestamps(&d);
-
-  /* Finalize the dump. */
-  dump_close(&d);
+  test_log_timestamps(&log);
 
   /* Be clean */
+  char filename[256];
+  sprintf(filename, "%s.dump", log.base_name);
   remove(filename);
 
+  /* Clean the logger. */
+  logger_clean(&log);
+
   /* Return a happy number. */
   return 0;
 }
diff --git a/tests/testMatrixInversion.c b/tests/testMatrixInversion.c
index a15e0dab7ec793cf4a914b6eb89c63863ab24fb0..8cd0f4e272a6b7e587619117e1aa143409976c51 100644
--- a/tests/testMatrixInversion.c
+++ b/tests/testMatrixInversion.c
@@ -16,9 +16,13 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
 
+/* Some standard headers. */
 #include <stdlib.h>
 #include <string.h>
+
+/* Local headers */
 #include "const.h"
 #include "dimension.h"
 #include "error.h"
diff --git a/tests/testOutputList.c b/tests/testOutputList.c
index b7df197405ee095cf9bf0a63e8cf7f00585f269f..fd69ef91389758adf87aa48ab983a6cfbd6a89a9 100644
--- a/tests/testOutputList.c
+++ b/tests/testOutputList.c
@@ -17,10 +17,9 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
 
-#include <math.h>
-#include <stdio.h>
-#include <string.h>
+/* Includes. */
 #include "swift.h"
 
 #define Ntest 3
@@ -36,9 +35,9 @@ const double time_values[Ntest] = {
 
 /* Expected values from file */
 const double a_values[Ntest] = {
-    0.5,
-    0.1,
     0.01,
+    0.1,
+    0.5,
 };
 
 void test_no_cosmo(struct engine *e, char *name, int with_assert) {
@@ -62,7 +61,7 @@ void test_no_cosmo(struct engine *e, char *name, int with_assert) {
   for (int i = 0; i < Ntest; i++) {
     /* Test last value */
     if (with_assert) {
-      assert(abs(output_time - time_values[i]) < tol);
+      assert(fabs(output_time - time_values[i]) < tol);
     }
 
     /* Set current time */
@@ -76,7 +75,7 @@ void test_no_cosmo(struct engine *e, char *name, int with_assert) {
     output_time = (double)(ti_next * e->time_base) + e->time_begin;
   }
 
-  output_list_clean(list);
+  output_list_clean(&list);
 };
 
 void test_cosmo(struct engine *e, char *name, int with_assert) {
@@ -98,7 +97,7 @@ void test_cosmo(struct engine *e, char *name, int with_assert) {
   for (int i = 0; i < Ntest; i++) {
     /* Test last value */
     if (with_assert) {
-      assert(abs(output_time - a_values[i]) < tol);
+      assert(fabs(output_time - a_values[i]) < tol);
     }
 
     /* Set current time */
@@ -112,7 +111,7 @@ void test_cosmo(struct engine *e, char *name, int with_assert) {
     output_time = (double)exp(ti_next * e->time_base) * e->cosmology->a_begin;
   }
 
-  output_list_clean(list);
+  output_list_clean(&list);
 };
 
 int main(int argc, char *argv[]) {
@@ -151,6 +150,8 @@ int main(int argc, char *argv[]) {
   test_cosmo(&e, "ScaleFactor", with_assert);
   test_cosmo(&e, "Time", without_assert);
 
+  cosmology_clean(&cosmo);
+
   /* Write message and leave */
   message("Test done");
   return 0;
diff --git a/tests/testParser.c b/tests/testParser.c
index 3944e86fa19a1f623623383eabefe1094bf5addf..84ce70ff44fad0482573c740d5a174285655c08d 100644
--- a/tests/testParser.c
+++ b/tests/testParser.c
@@ -114,6 +114,8 @@ int main(int argc, char *argv[]) {
   int haveoptwords1 = parser_get_opt_param_string_array(
       &param_file, "Simulation:optwords", &nvar_result, &var_result, noptwords,
       optwords);
+  parser_free_param_string_array(nvar_result, var_result);
+
   /* Check if we can read it again */
   int haveoptwords2 = parser_get_opt_param_string_array(
       &param_file, "Simulation:optwords", &nvar_result, &var_result, noptwords,
diff --git a/tests/testPeriodicBC.c b/tests/testPeriodicBC.c
index de30b1af9ac8595cb081eb0702e9a7e7da13a162..be83f20a58b17f9a5fdcf967cda9a678aab5b8a9 100644
--- a/tests/testPeriodicBC.c
+++ b/tests/testPeriodicBC.c
@@ -81,16 +81,16 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   struct cell *cell = (struct cell *)malloc(sizeof(struct cell));
   bzero(cell, sizeof(struct cell));
 
-  if (posix_memalign((void **)&cell->parts, part_align,
+  if (posix_memalign((void **)&cell->hydro.parts, part_align,
                      count * sizeof(struct part)) != 0) {
     error("couldn't allocate particles, no. of particles: %d", (int)count);
   }
-  bzero(cell->parts, count * sizeof(struct part));
+  bzero(cell->hydro.parts, count * sizeof(struct part));
 
   float h_max = 0.f;
 
   /* Construct the parts */
-  struct part *part = cell->parts;
+  struct part *part = cell->hydro.parts;
   for (size_t x = 0; x < n; ++x) {
     for (size_t y = 0; y < n; ++y) {
       for (size_t z = 0; z < n; ++z) {
@@ -161,10 +161,10 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
 
   /* Cell properties */
   cell->split = 0;
-  cell->h_max = h_max;
-  cell->count = count;
-  cell->dx_max_part = 0.;
-  cell->dx_max_sort = 0.;
+  cell->hydro.h_max = h_max;
+  cell->hydro.count = count;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
   cell->width[0] = size;
   cell->width[1] = size;
   cell->width[2] = size;
@@ -172,23 +172,23 @@ struct cell *make_cell(size_t n, double *offset, double size, double h,
   cell->loc[1] = offset[1];
   cell->loc[2] = offset[2];
 
-  cell->ti_old_part = 8;
-  cell->ti_hydro_end_min = 8;
-  cell->ti_hydro_end_max = 8;
+  cell->hydro.ti_old_part = 8;
+  cell->hydro.ti_end_min = 8;
+  cell->hydro.ti_end_max = 8;
   cell->nodeID = NODE_ID;
 
-  shuffle_particles(cell->parts, cell->count);
+  shuffle_particles(cell->hydro.parts, cell->hydro.count);
 
-  cell->sorted = 0;
-  for (int k = 0; k < 13; k++) cell->sort[k] = NULL;
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
 
   return cell;
 }
 
 void clean_up(struct cell *ci) {
-  free(ci->parts);
+  free(ci->hydro.parts);
   for (int k = 0; k < 13; k++)
-    if (ci->sort[k] != NULL) free(ci->sort[k]);
+    if (ci->hydro.sort[k] != NULL) free(ci->hydro.sort[k]);
   free(ci);
 }
 
@@ -196,8 +196,8 @@ void clean_up(struct cell *ci) {
  * @brief Initializes all particles field to be ready for a density calculation
  */
 void zero_particle_fields(struct cell *c) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_init_part(&c->parts[pid], NULL);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_init_part(&c->hydro.parts[pid], NULL);
   }
 }
 
@@ -205,8 +205,8 @@ void zero_particle_fields(struct cell *c) {
  * @brief Ends the loop by adding the appropriate coefficients
  */
 void end_calculation(struct cell *c, const struct cosmology *cosmo) {
-  for (int pid = 0; pid < c->count; pid++) {
-    hydro_end_density(&c->parts[pid], cosmo);
+  for (int pid = 0; pid < c->hydro.count; pid++) {
+    hydro_end_density(&c->hydro.parts[pid], cosmo);
   }
 }
 
@@ -228,27 +228,27 @@ void dump_particle_fields(char *fileName, struct cell *main_cell, int i, int j,
           i, j, k);
 
   /* Write main cell */
-  for (int pid = 0; pid < main_cell->count; pid++) {
+  for (int pid = 0; pid < main_cell->hydro.count; pid++) {
     fprintf(file,
             "%6llu %10f %10f %10f %10f %10f %10f %13e %13e %13e %13e %13e "
             "%13e %13e %13e\n",
-            main_cell->parts[pid].id, main_cell->parts[pid].x[0],
-            main_cell->parts[pid].x[1], main_cell->parts[pid].x[2],
-            main_cell->parts[pid].v[0], main_cell->parts[pid].v[1],
-            main_cell->parts[pid].v[2],
-            hydro_get_comoving_density(&main_cell->parts[pid]),
+            main_cell->hydro.parts[pid].id, main_cell->hydro.parts[pid].x[0],
+            main_cell->hydro.parts[pid].x[1], main_cell->hydro.parts[pid].x[2],
+            main_cell->hydro.parts[pid].v[0], main_cell->hydro.parts[pid].v[1],
+            main_cell->hydro.parts[pid].v[2],
+            hydro_get_comoving_density(&main_cell->hydro.parts[pid]),
 #if defined(GIZMO_MFV_SPH) || defined(SHADOWFAX_SPH)
             0.f,
 #else
-            main_cell->parts[pid].density.rho_dh,
+            main_cell->hydro.parts[pid].density.rho_dh,
 #endif
-            main_cell->parts[pid].density.wcount,
-            main_cell->parts[pid].density.wcount_dh,
+            main_cell->hydro.parts[pid].density.wcount,
+            main_cell->hydro.parts[pid].density.wcount_dh,
 #if defined(GADGET2_SPH) || defined(DEFAULT_SPH) || defined(HOPKINS_PE_SPH)
-            main_cell->parts[pid].density.div_v,
-            main_cell->parts[pid].density.rot_v[0],
-            main_cell->parts[pid].density.rot_v[1],
-            main_cell->parts[pid].density.rot_v[2]
+            main_cell->hydro.parts[pid].density.div_v,
+            main_cell->hydro.parts[pid].density.rot_v[0],
+            main_cell->hydro.parts[pid].density.rot_v[1],
+            main_cell->hydro.parts[pid].density.rot_v[2]
 #else
             0., 0., 0., 0.
 #endif
@@ -273,7 +273,7 @@ int check_results(struct part *serial_parts, struct part *vec_parts, int count,
   int result = 0;
 
   for (int i = 0; i < count; i++)
-    result += compare_particles(serial_parts[i], vec_parts[i], threshold);
+    result += compare_particles(&serial_parts[i], &vec_parts[i], threshold);
 
   return result;
 }
@@ -505,8 +505,8 @@ int main(int argc, char *argv[]) {
 
         runner_do_drift_part(&runner, cells[i * (dim * dim) + j * dim + k], 0);
 
-        runner_do_sort(&runner, cells[i * (dim * dim) + j * dim + k], 0x1FFF, 0,
-                       0);
+        runner_do_hydro_sort(&runner, cells[i * (dim * dim) + j * dim + k],
+                             0x1FFF, 0, 0);
       }
     }
   }
diff --git a/tests/testPotentialPair.c b/tests/testPotentialPair.c
index 380d1fc979f129d46b08306d759eae3ff2739195..064c86d42f8df907d1ffaaab164b6a2f8b534b19 100644
--- a/tests/testPotentialPair.c
+++ b/tests/testPotentialPair.c
@@ -106,6 +106,7 @@ int main(int argc, char *argv[]) {
   e.time = 0.1f;
   e.ti_current = 8;
   e.time_base = 1e-10;
+  e.nodeID = 0;
 
   struct space s;
   s.periodic = 0;
@@ -141,57 +142,59 @@ int main(int argc, char *argv[]) {
   bzero(&ci, sizeof(struct cell));
   bzero(&cj, sizeof(struct cell));
 
+  ci.nodeID = 0;
   ci.width[0] = 1.;
   ci.width[1] = 1.;
   ci.width[2] = 1.;
   ci.loc[0] = 0.;
   ci.loc[1] = 0.;
   ci.loc[2] = 0.;
-  ci.gcount = 1;
-  ci.ti_old_gpart = 8;
-  ci.ti_old_multipole = 8;
-  ci.ti_gravity_end_min = 8;
-  ci.ti_gravity_end_max = 8;
+  ci.grav.count = 1;
+  ci.grav.ti_old_part = 8;
+  ci.grav.ti_old_multipole = 8;
+  ci.grav.ti_end_min = 8;
+  ci.grav.ti_end_max = 8;
 
+  cj.nodeID = 0;
   cj.width[0] = 1.;
   cj.width[1] = 1.;
   cj.width[2] = 1.;
   cj.loc[0] = 1.;
   cj.loc[1] = 0.;
   cj.loc[2] = 0.;
-  cj.gcount = num_tests;
-  cj.ti_old_gpart = 8;
-  cj.ti_old_multipole = 8;
-  cj.ti_gravity_end_min = 8;
-  cj.ti_gravity_end_max = 8;
+  cj.grav.count = num_tests;
+  cj.grav.ti_old_part = 8;
+  cj.grav.ti_old_multipole = 8;
+  cj.grav.ti_end_min = 8;
+  cj.grav.ti_end_max = 8;
 
   /* Allocate multipoles */
-  ci.multipole =
+  ci.grav.multipole =
       (struct gravity_tensors *)malloc(sizeof(struct gravity_tensors));
-  cj.multipole =
+  cj.grav.multipole =
       (struct gravity_tensors *)malloc(sizeof(struct gravity_tensors));
-  bzero(ci.multipole, sizeof(struct gravity_tensors));
-  bzero(cj.multipole, sizeof(struct gravity_tensors));
+  bzero(ci.grav.multipole, sizeof(struct gravity_tensors));
+  bzero(cj.grav.multipole, sizeof(struct gravity_tensors));
 
   /* Set the multipoles */
-  ci.multipole->r_max = 0.1;
-  cj.multipole->r_max = 0.1;
+  ci.grav.multipole->r_max = 0.1;
+  cj.grav.multipole->r_max = 0.1;
 
   /* Allocate the particles */
-  if (posix_memalign((void **)&ci.gparts, gpart_align,
-                     ci.gcount * sizeof(struct gpart)) != 0)
+  if (posix_memalign((void **)&ci.grav.parts, gpart_align,
+                     ci.grav.count * sizeof(struct gpart)) != 0)
     error("Error allocating gparts for cell ci");
-  bzero(ci.gparts, ci.gcount * sizeof(struct gpart));
+  bzero(ci.grav.parts, ci.grav.count * sizeof(struct gpart));
 
-  if (posix_memalign((void **)&cj.gparts, gpart_align,
-                     cj.gcount * sizeof(struct gpart)) != 0)
+  if (posix_memalign((void **)&cj.grav.parts, gpart_align,
+                     cj.grav.count * sizeof(struct gpart)) != 0)
     error("Error allocating gparts for cell ci");
-  bzero(cj.gparts, cj.gcount * sizeof(struct gpart));
+  bzero(cj.grav.parts, cj.grav.count * sizeof(struct gpart));
 
   /* Create the mass-less test particles */
   for (int n = 0; n < num_tests; ++n) {
 
-    struct gpart *gp = &cj.gparts[n];
+    struct gpart *gp = &cj.grav.parts[n];
 
     gp->x[0] = 1. + (n + 1) / ((double)num_tests);
     gp->x[1] = 0.5;
@@ -202,6 +205,7 @@ int main(int argc, char *argv[]) {
     gp->id_or_neg_offset = n + 1;
 #ifdef SWIFT_DEBUG_CHECKS
     gp->ti_drift = 8;
+    gp->initialised = 1;
 #endif
   }
 
@@ -210,15 +214,16 @@ int main(int argc, char *argv[]) {
   /***********************************************/
 
   /* Create the massive particle */
-  ci.gparts[0].x[0] = 0.;
-  ci.gparts[0].x[1] = 0.5;
-  ci.gparts[0].x[2] = 0.5;
-  ci.gparts[0].mass = 1.;
-  ci.gparts[0].time_bin = 1;
-  ci.gparts[0].type = swift_type_dark_matter;
-  ci.gparts[0].id_or_neg_offset = 1;
+  ci.grav.parts[0].x[0] = 0.;
+  ci.grav.parts[0].x[1] = 0.5;
+  ci.grav.parts[0].x[2] = 0.5;
+  ci.grav.parts[0].mass = 1.;
+  ci.grav.parts[0].time_bin = 1;
+  ci.grav.parts[0].type = swift_type_dark_matter;
+  ci.grav.parts[0].id_or_neg_offset = 1;
 #ifdef SWIFT_DEBUG_CHECKS
-  ci.gparts[0].ti_drift = 8;
+  ci.grav.parts[0].ti_drift = 8;
+  ci.grav.parts[0].initialised = 1;
 #endif
 
   /* Now compute the forces */
@@ -226,28 +231,29 @@ int main(int argc, char *argv[]) {
 
   /* Verify everything */
   for (int n = 0; n < num_tests; ++n) {
-    const struct gpart *gp = &cj.gparts[n];
-    const struct gpart *gp2 = &ci.gparts[0];
+    const struct gpart *gp = &cj.grav.parts[n];
+    const struct gpart *gp2 = &ci.grav.parts[0];
     const double epsilon = gravity_get_softening(gp, &props);
 
 #if defined(POTENTIAL_GRAVITY)
     double pot_true =
-        potential(ci.gparts[0].mass, gp->x[0] - gp2->x[0], epsilon, rlr);
+        potential(ci.grav.parts[0].mass, gp->x[0] - gp2->x[0], epsilon, rlr);
     check_value(gp->potential, pot_true, "potential");
 #endif
 
     double acc_true =
-        acceleration(ci.gparts[0].mass, gp->x[0] - gp2->x[0], epsilon, rlr);
-    check_value(gp->a_grav[0], acc_true, "acceleration");
+        acceleration(ci.grav.parts[0].mass, gp->x[0] - gp2->x[0], epsilon, rlr);
 
     /* message("x=%e f=%e f_true=%e pot=%e pot_true=%e", gp->x[0] - gp2->x[0],
-     *         gp->a_grav[0], acc_true, gp->potential, pot_true); */
+       gp->a_grav[0], acc_true, gp->potential, pot_true); */
+
+    check_value(gp->a_grav[0], acc_true, "acceleration");
   }
 
   message("\n\t\t P-P interactions all good\n");
 
   /* Reset the accelerations */
-  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.gparts[n]);
+  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.grav.parts[n]);
 
   /**********************************/
   /* Test the basic PM interactions */
@@ -256,22 +262,22 @@ int main(int argc, char *argv[]) {
   /* Set an opening angle that allows P-M interactions */
   props.theta_crit2 = 1.;
 
-  ci.gparts[0].mass = 0.;
-  ci.multipole->CoM[0] = 0.;
-  ci.multipole->CoM[1] = 0.5;
-  ci.multipole->CoM[2] = 0.5;
+  ci.grav.parts[0].mass = 0.;
+  ci.grav.multipole->CoM[0] = 0.;
+  ci.grav.multipole->CoM[1] = 0.5;
+  ci.grav.multipole->CoM[2] = 0.5;
 
-  bzero(&ci.multipole->m_pole, sizeof(struct multipole));
-  bzero(&cj.multipole->m_pole, sizeof(struct multipole));
-  ci.multipole->m_pole.M_000 = 1.;
+  bzero(&ci.grav.multipole->m_pole, sizeof(struct multipole));
+  bzero(&cj.grav.multipole->m_pole, sizeof(struct multipole));
+  ci.grav.multipole->m_pole.M_000 = 1.;
 
   /* Now compute the forces */
   runner_dopair_grav_pp(&r, &ci, &cj, 1, 1);
 
   /* Verify everything */
   for (int n = 0; n < num_tests; ++n) {
-    const struct gpart *gp = &cj.gparts[n];
-    const struct gravity_tensors *mpole = ci.multipole;
+    const struct gpart *gp = &cj.grav.parts[n];
+    const struct gravity_tensors *mpole = ci.grav.multipole;
     const double epsilon = gravity_get_softening(gp, &props);
 
 #if defined(POTENTIAL_GRAVITY)
@@ -293,7 +299,7 @@ int main(int argc, char *argv[]) {
 #ifndef GADGET2_LONG_RANGE_CORRECTION
 
   /* Reset the accelerations */
-  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.gparts[n]);
+  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.grav.parts[n]);
 
   /***************************************/
   /* Test the truncated PM interactions  */
@@ -310,8 +316,8 @@ int main(int argc, char *argv[]) {
 
   /* Verify everything */
   for (int n = 0; n < num_tests; ++n) {
-    const struct gpart *gp = &cj.gparts[n];
-    const struct gravity_tensors *mpole = ci.multipole;
+    const struct gpart *gp = &cj.grav.parts[n];
+    const struct gravity_tensors *mpole = ci.grav.multipole;
     const double epsilon = gravity_get_softening(gp, &props);
 
 #if defined(POTENTIAL_GRAVITY)
@@ -338,57 +344,58 @@ int main(int argc, char *argv[]) {
   /************************************************/
 
   /* Reset the accelerations */
-  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.gparts[n]);
+  for (int n = 0; n < num_tests; ++n) gravity_init_gpart(&cj.grav.parts[n]);
 
 #if SELF_GRAVITY_MULTIPOLE_ORDER >= 3
 
   /* Let's make ci more interesting */
-  free(ci.gparts);
-  ci.gcount = 8;
-  if (posix_memalign((void **)&ci.gparts, gpart_align,
-                     ci.gcount * sizeof(struct gpart)) != 0)
+  free(ci.grav.parts);
+  ci.grav.count = 8;
+  if (posix_memalign((void **)&ci.grav.parts, gpart_align,
+                     ci.grav.count * sizeof(struct gpart)) != 0)
     error("Error allocating gparts for cell ci");
-  bzero(ci.gparts, ci.gcount * sizeof(struct gpart));
+  bzero(ci.grav.parts, ci.grav.count * sizeof(struct gpart));
 
   /* Place particles on a simple cube of side-length 0.2 */
   for (int n = 0; n < 8; ++n) {
     if (n & 1)
-      ci.gparts[n].x[0] = 0.0 - 0.1;
+      ci.grav.parts[n].x[0] = 0.0 - 0.1;
     else
-      ci.gparts[n].x[0] = 0.0 + 0.1;
+      ci.grav.parts[n].x[0] = 0.0 + 0.1;
 
     if (n & 2)
-      ci.gparts[n].x[1] = 0.5 - 0.1;
+      ci.grav.parts[n].x[1] = 0.5 - 0.1;
     else
-      ci.gparts[n].x[1] = 0.5 + 0.1;
+      ci.grav.parts[n].x[1] = 0.5 + 0.1;
 
     if (n & 2)
-      ci.gparts[n].x[2] = 0.5 - 0.1;
+      ci.grav.parts[n].x[2] = 0.5 - 0.1;
     else
-      ci.gparts[n].x[2] = 0.5 + 0.1;
+      ci.grav.parts[n].x[2] = 0.5 + 0.1;
 
-    ci.gparts[n].mass = 1. / 8.;
+    ci.grav.parts[n].mass = 1. / 8.;
 
-    ci.gparts[n].time_bin = 1;
-    ci.gparts[n].type = swift_type_dark_matter;
-    ci.gparts[n].id_or_neg_offset = 1;
+    ci.grav.parts[n].time_bin = 1;
+    ci.grav.parts[n].type = swift_type_dark_matter;
+    ci.grav.parts[n].id_or_neg_offset = 1;
 #ifdef SWIFT_DEBUG_CHECKS
-    ci.gparts[n].ti_drift = 8;
+    ci.grav.parts[n].ti_drift = 8;
+    ci.grav.parts[n].initialised = 1;
 #endif
   }
 
   /* Now let's make a multipole out of it. */
-  gravity_reset(ci.multipole);
-  gravity_P2M(ci.multipole, ci.gparts, ci.gcount);
+  gravity_reset(ci.grav.multipole);
+  gravity_P2M(ci.grav.multipole, ci.grav.parts, ci.grav.count);
 
-  gravity_multipole_print(&ci.multipole->m_pole);
+  gravity_multipole_print(&ci.grav.multipole->m_pole);
 
   /* Compute the forces */
   runner_dopair_grav_pp(&r, &ci, &cj, 1, 1);
 
   /* Verify everything */
   for (int n = 0; n < num_tests; ++n) {
-    const struct gpart *gp = &cj.gparts[n];
+    const struct gpart *gp = &cj.grav.parts[n];
 
 #if defined(POTENTIAL_GRAVITY)
     double pot_true = 0;
@@ -396,7 +403,7 @@ int main(int argc, char *argv[]) {
     double acc_true[3] = {0., 0., 0.};
 
     for (int i = 0; i < 8; ++i) {
-      const struct gpart *gp2 = &ci.gparts[i];
+      const struct gpart *gp2 = &ci.grav.parts[i];
       const double epsilon = gravity_get_softening(gp, &props);
 
       const double dx[3] = {gp2->x[0] - gp->x[0], gp2->x[1] - gp->x[1],
@@ -417,7 +424,7 @@ int main(int argc, char *argv[]) {
 #endif
     check_value_backend(gp->a_grav[0], acc_true[0], "acceleration", 1e-2, 1e-6);
 
-    /* const struct gravity_tensors *mpole = ci.multipole; */
+    /* const struct gravity_tensors *mpole = ci.grav.multipole; */
     /* message("x=%e f=%e f_true=%e pot=%e pot_true=%e %e %e", */
     /*         gp->x[0] - mpole->CoM[0], gp->a_grav[0], acc_true[0],
      * gp->potential, */
@@ -428,9 +435,14 @@ int main(int argc, char *argv[]) {
 
 #endif
 
-  free(ci.multipole);
-  free(cj.multipole);
-  free(ci.gparts);
-  free(cj.gparts);
+  free(ci.grav.multipole);
+  free(cj.grav.multipole);
+  free(ci.grav.parts);
+  free(cj.grav.parts);
+
+  /* Clean up the caches */
+  gravity_cache_clean(&r.ci_gravity_cache);
+  gravity_cache_clean(&r.cj_gravity_cache);
+
   return 0;
 }
diff --git a/tests/testPotentialSelf.c b/tests/testPotentialSelf.c
index 6bf5dbd405830f1ba1c58d8627606a67111f5fb0..10eb499570a591daaf0de2e011f2346077905e8e 100644
--- a/tests/testPotentialSelf.c
+++ b/tests/testPotentialSelf.c
@@ -137,32 +137,33 @@ int main(int argc, char *argv[]) {
   c.loc[0] = 0.;
   c.loc[1] = 0.;
   c.loc[2] = 0.;
-  c.gcount = 1 + num_tests;
-  c.ti_old_gpart = 8;
-  c.ti_gravity_end_min = 8;
-  c.ti_gravity_end_max = 8;
+  c.grav.count = 1 + num_tests;
+  c.grav.ti_old_part = 8;
+  c.grav.ti_end_min = 8;
+  c.grav.ti_end_max = 8;
 
-  if (posix_memalign((void **)&c.gparts, gpart_align,
-                     c.gcount * sizeof(struct gpart)) != 0)
+  if (posix_memalign((void **)&c.grav.parts, gpart_align,
+                     c.grav.count * sizeof(struct gpart)) != 0)
     error("Impossible to allocate memory for the gparts.");
-  bzero(c.gparts, c.gcount * sizeof(struct gpart));
+  bzero(c.grav.parts, c.grav.count * sizeof(struct gpart));
 
   /* Create the massive particle */
-  c.gparts[0].x[0] = 0.;
-  c.gparts[0].x[1] = 0.5;
-  c.gparts[0].x[2] = 0.5;
-  c.gparts[0].mass = 1.;
-  c.gparts[0].time_bin = 1;
-  c.gparts[0].type = swift_type_dark_matter;
-  c.gparts[0].id_or_neg_offset = 1;
+  c.grav.parts[0].x[0] = 0.;
+  c.grav.parts[0].x[1] = 0.5;
+  c.grav.parts[0].x[2] = 0.5;
+  c.grav.parts[0].mass = 1.;
+  c.grav.parts[0].time_bin = 1;
+  c.grav.parts[0].type = swift_type_dark_matter;
+  c.grav.parts[0].id_or_neg_offset = 1;
 #ifdef SWIFT_DEBUG_CHECKS
-  c.gparts[0].ti_drift = 8;
+  c.grav.parts[0].ti_drift = 8;
+  c.grav.parts[0].initialised = 1;
 #endif
 
   /* Create the mass-less particles */
   for (int n = 1; n < num_tests + 1; ++n) {
 
-    struct gpart *gp = &c.gparts[n];
+    struct gpart *gp = &c.grav.parts[n];
 
     gp->x[0] = n / ((double)num_tests);
     gp->x[1] = 0.5;
@@ -173,6 +174,7 @@ int main(int argc, char *argv[]) {
     gp->id_or_neg_offset = n + 1;
 #ifdef SWIFT_DEBUG_CHECKS
     gp->ti_drift = 8;
+    gp->initialised = 1;
 #endif
   }
 
@@ -181,21 +183,27 @@ int main(int argc, char *argv[]) {
 
   /* Verify everything */
   for (int n = 1; n < num_tests + 1; ++n) {
-    const struct gpart *gp = &c.gparts[n];
+    const struct gpart *gp = &c.grav.parts[n];
 
     const double epsilon = gravity_get_softening(gp, &props);
 
 #if defined(POTENTIAL_GRAVITY)
-    double pot_true = potential(c.gparts[0].mass, gp->x[0], epsilon, rlr);
+    double pot_true = potential(c.grav.parts[0].mass, gp->x[0], epsilon, rlr);
     check_value(gp->potential, pot_true, "potential");
 #endif
 
-    double acc_true = acceleration(c.gparts[0].mass, gp->x[0], epsilon, rlr);
+    double acc_true =
+        acceleration(c.grav.parts[0].mass, gp->x[0], epsilon, rlr);
     check_value(gp->a_grav[0], acc_true, "acceleration");
 
     // message("x=%e f=%e f_true=%e", gp->x[0], gp->a_grav[0], acc_true);
   }
 
-  free(c.gparts);
+  free(c.grav.parts);
+
+  /* Clean up the caches */
+  gravity_cache_clean(&r.ci_gravity_cache);
+
+  /* All done! */
   return 0;
 }
diff --git a/tests/testRandom.c b/tests/testRandom.c
new file mode 100644
index 0000000000000000000000000000000000000000..8d8619d63e54f3030a2e7288402c4a78857361cb
--- /dev/null
+++ b/tests/testRandom.c
@@ -0,0 +1,307 @@
+/*******************************************************************************
+ * This file is part of SWIFT.
+ * Copyright (C) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
+ *               2019 Folkert Nobels    (nobels@strw.leidenuniv.nl)
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ ******************************************************************************/
+
+/* Config parameters. */
+#include "../config.h"
+
+#include <fenv.h>
+
+/* Local headers. */
+#include "swift.h"
+
+/**
+ * @brief Compute the Pearson correlation coefficient for two sets of numbers
+ *
+ * The pearson correlation coefficient between two sets of numbers can be
+ * calculated as:
+ *
+ *           <x*y> - <x>*<y>
+ * r_xy = ----------------------
+ *         (var(x) * var(y))^.5
+ *
+ * In the case that both sets are purely uncorrelated the value of the
+ * Pearson correlation function is expected to be close to 0. In the case that
+ * there is positive correlation r_xy > 0 and in the case of negative
+ * correlation, the function has r_xy < 0.
+ *
+ * @param mean1 average of first series of numbers
+ * @param mean2 average of second series of numbers
+ * @param total12 sum of x_i * y_i of both series of numbers
+ * @param var1 variance of the first series of numbers
+ * @param var2 variance of the second series of numbers
+ * @param counter number of elements in both series
+ * @return the Pearson correlation coefficient
+ * */
+double pearsonfunc(double mean1, double mean2, double total12, double var1,
+                   double var2, int counter) {
+
+  const double mean12 = total12 / (double)counter;
+  const double correlation = (mean12 - mean1 * mean2) / sqrt(var1 * var2);
+  return fabs(correlation);
+}
+
+/**
+ * @brief Test to check that the pseodo-random numbers in SWIFT are random
+ * enough for our purpose.
+ *
+ * The test initializes with the current time and than creates 20 ID numbers
+ * it runs the test using these 20 ID numbers. Using these 20 ID numbers it
+ * Checks 4 different things:
+ * 1. The mean and variance are correct for random numbers generated by this
+ *    ID number.
+ * 2. The random numbers from this ID number do not cause correlation in time.
+ *    Correlation is checked using the Pearson correlation coefficient which
+ *    should be sufficiently close to zero.
+ * 3. A small offset in ID number of 2, doesn't cause correlation between
+ *    the two sets of random numbers (again with the Pearson correlation
+ *    coefficient) and the mean and variance of this set is
+ *    also correct.
+ * 4. Different physical processes in random.h are also uncorrelated and
+ *    produce the correct mean and variance as expected. Again the correlation
+ *    is calculated using the Pearson correlation coefficient.
+ *
+ * More information about the Pearson correlation coefficient can be found in
+ * the function pearsonfunc above this function.
+ *
+ * @param argc Unused
+ * @param argv Unused
+ * @return 0 if everything is fine, 1 if random numbers are not random enough.
+ */
+int main(int argc, char* argv[]) {
+
+  /* Initialize CPU frequency, this also starts time. */
+  unsigned long long cpufreq = 0;
+  clocks_set_cpufreq(cpufreq);
+
+/* Choke on FPEs */
+#ifdef HAVE_FE_ENABLE_EXCEPT
+  feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
+#endif
+
+  /* Get some randomness going */
+  const int seed = time(NULL);
+  message("Seed = %d", seed);
+  srand(seed);
+
+  /* Time-step size */
+  const int time_bin = 29;
+
+  /* Try a few different values for the ID */
+  for (int i = 0; i < 20; ++i) {
+
+    const long long id = rand() * (1LL << 31) + rand();
+    const integertime_t increment = (1LL << time_bin);
+    const long long idoffset = id + 2;
+
+    message("Testing id=%lld time_bin=%d", id, time_bin);
+
+    double total = 0., total2 = 0.;
+    int count = 0;
+
+    /* Pearson correlation variables for different times */
+    double sum_previous_current = 0.;
+    double previous = 0.;
+
+    /* Pearson correlation for two different IDs */
+    double pearsonIDs = 0.;
+    double totalID = 0.;
+    double total2ID = 0.;
+
+    /* Pearson correlation for different processes */
+    double pearson_star_sf = 0.;
+    double pearson_star_se = 0.;
+    double pearson_star_bh = 0.;
+    double pearson_sf_se = 0.;
+    double pearson_sf_bh = 0.;
+    double pearson_se_bh = 0.;
+
+    /* Calculate the mean and <x^2> for these processes */
+    double total_sf = 0.;
+    double total_se = 0.;
+    double total_bh = 0.;
+
+    double total2_sf = 0.;
+    double total2_se = 0.;
+    double total2_bh = 0.;
+
+    /* Check that the numbers are uniform over the full-range of useful
+     * time-steps */
+    for (integertime_t ti_current = 0LL; ti_current < max_nr_timesteps;
+         ti_current += increment) {
+
+      ti_current += increment;
+
+      const double r =
+          random_unit_interval(id, ti_current, random_number_star_formation);
+
+      total += r;
+      total2 += r * r;
+      count++;
+
+      /* Calculate for correlation between time.
+       * For this we use the pearson correlation of time i and i-1 */
+      sum_previous_current += r * previous;
+      previous = r;
+
+      /* Calculate if there is a correlation between different ids */
+      const double r_2ndid = random_unit_interval(idoffset, ti_current,
+                                                  random_number_star_formation);
+
+      /* Pearson correlation for small different IDs */
+      pearsonIDs += r * r_2ndid;
+      totalID += r_2ndid;
+      total2ID += r_2ndid * r_2ndid;
+
+      /* Calculate random numbers for the different processes and check
+       * that they are uncorrelated */
+
+      const double r_sf =
+          random_unit_interval(id, ti_current, random_number_stellar_feedback);
+
+      const double r_se = random_unit_interval(
+          id, ti_current, random_number_stellar_enrichment);
+
+      const double r_bh =
+          random_unit_interval(id, ti_current, random_number_BH_feedback);
+
+      /* Calculate the correlation between the different processes */
+      total_sf += r_sf;
+      total_se += r_se;
+      total_bh += r_bh;
+
+      total2_sf += r_sf * r_sf;
+      total2_se += r_se * r_se;
+      total2_bh += r_bh * r_bh;
+
+      pearson_star_sf += r * r_sf;
+      pearson_star_se += r * r_se;
+      pearson_star_bh += r * r_bh;
+      pearson_sf_se += r_sf * r_se;
+      pearson_sf_bh += r_sf * r_bh;
+      pearson_se_bh += r_se * r_bh;
+    }
+
+    const double mean = total / (double)count;
+    const double var = total2 / (double)count - mean * mean;
+
+    /* Pearson correlation calculation for different times */
+    // const double mean_xy = sum_previous_current / ((double)count - 1.f);
+    // const double correlation = (mean_xy - mean * mean) / var;
+    const double correlation =
+        pearsonfunc(mean, mean, sum_previous_current, var, var, count - 1);
+
+    /* Mean for different IDs */
+    const double meanID = totalID / (double)count;
+    const double varID = total2ID / (double)count - meanID * meanID;
+
+    /* Pearson correlation between different IDs*/
+    const double correlationID =
+        pearsonfunc(mean, meanID, pearsonIDs, var, varID, count);
+
+    /* Mean and <x^2> for different processes */
+    const double mean_sf = total_sf / (double)count;
+    const double mean_se = total_se / (double)count;
+    const double mean_bh = total_bh / (double)count;
+
+    const double var_sf = total2_sf / (double)count - mean_sf * mean_sf;
+    const double var_se = total2_se / (double)count - mean_se * mean_se;
+    const double var_bh = total2_bh / (double)count - mean_bh * mean_bh;
+
+    /* Correlation between different processes */
+    const double corr_star_sf =
+        pearsonfunc(mean, mean_sf, pearson_star_sf, var, var_sf, count);
+    const double corr_star_se =
+        pearsonfunc(mean, mean_se, pearson_star_se, var, var_se, count);
+    const double corr_star_bh =
+        pearsonfunc(mean, mean_bh, pearson_star_bh, var, var_bh, count);
+    const double corr_sf_se =
+        pearsonfunc(mean_sf, mean_se, pearson_sf_se, var_sf, var_se, count);
+    const double corr_sf_bh =
+        pearsonfunc(mean_sf, mean_bh, pearson_sf_bh, var_sf, var_bh, count);
+    const double corr_se_bh =
+        pearsonfunc(mean_se, mean_bh, pearson_se_bh, var_se, var_bh, count);
+
+    /* Verify that the mean and variance match the expected values for a uniform
+     * distribution */
+    const double tolmean = 2e-4;
+    const double tolvar = 1e-3;
+    const double tolcorr = 4e-4;
+
+    if ((fabs(mean - 0.5) / 0.5 > tolmean) ||
+        (fabs(var - 1. / 12.) / (1. / 12.) > tolvar) ||
+        (correlation > tolcorr) || (correlationID > tolcorr) ||
+        (fabs(meanID - 0.5) / 0.5 > tolmean) ||
+        (fabs(varID - 1. / 12.) / (1. / 12.) > tolvar) ||
+        (corr_star_sf > tolcorr) || (corr_star_se > tolcorr) ||
+        (corr_star_bh > tolcorr) || (corr_sf_se > tolcorr) ||
+        (corr_sf_bh > tolcorr) || (corr_se_bh > tolcorr) ||
+        (fabs(mean_sf - 0.5) / 0.5 > tolmean) ||
+        (fabs(mean_se - 0.5) / 0.5 > tolmean) ||
+        (fabs(mean_bh - 0.5) / 0.5 > tolmean) ||
+        (fabs(var_sf - 1. / 12.) / (1. / 12.) > tolvar) ||
+        (fabs(var_se - 1. / 12.) / (1. / 12.) > tolvar) ||
+        (fabs(var_bh - 1. / 12.) / (1. / 12.) > tolvar)) {
+      message("Test failed!");
+      message("Global result:");
+      message("Result:    count=%d mean=%f var=%f, correlation=%f", count, mean,
+              var, correlation);
+      message("Expected:  count=%d mean=%f var=%f, correlation=%f", count, 0.5f,
+              1. / 12., 0.);
+      message("ID part");
+      message(
+          "Result:     count%d mean=%f var=%f"
+          " correlation=%f",
+          count, meanID, varID, correlationID);
+      message(
+          "Expected:   count%d mean=%f var=%f"
+          " correlation=%f",
+          count, .5f, 1. / 12., 0.);
+      message("Different physical processes:");
+      message(
+          "Means:    stars=%f stellar feedback=%f stellar "
+          " enrichement=%f black holes=%f",
+          mean, mean_sf, mean_se, mean_bh);
+      message(
+          "Expected: stars=%f stellar feedback=%f stellar "
+          " enrichement=%f black holes=%f",
+          .5f, .5f, .5f, .5f);
+      message(
+          "Var:      stars=%f stellar feedback=%f stellar "
+          " enrichement=%f black holes=%f",
+          var, var_sf, var_se, var_bh);
+      message(
+          "Expected: stars=%f stellar feedback=%f stellar "
+          " enrichement=%f black holes=%f",
+          1. / 12., 1. / 12., 1 / 12., 1. / 12.);
+      message(
+          "Correlation: stars-sf=%f stars-se=%f stars-bh=%f"
+          "sf-se=%f sf-bh=%f se-bh=%f",
+          corr_star_sf, corr_star_se, corr_star_bh, corr_sf_se, corr_sf_bh,
+          corr_se_bh);
+      message(
+          "Expected:    stars-sf=%f stars-se=%f stars-bh=%f"
+          "sf-se=%f sf-bh=%f se-bh=%f",
+          0., 0., 0., 0., 0., 0.);
+      return 1;
+    }
+  }
+
+  return 0;
+}
diff --git a/tests/testReading.c b/tests/testReading.c
index 5e6cee7f1e37f7615eb2c3b4edcaee1d4ebba319..d7d3fcbdae2f3ab744f338bb74e105644a5d88be 100644
--- a/tests/testReading.c
+++ b/tests/testReading.c
@@ -17,6 +17,9 @@
  *
  ******************************************************************************/
 
+/* Some standard headers. */
+#include "../config.h"
+
 /* Some standard headers. */
 #include <stdlib.h>
 
@@ -26,7 +29,6 @@
 int main(int argc, char *argv[]) {
 
   size_t Ngas = 0, Ngpart = 0, Nspart = 0;
-  int periodic = -1;
   int flag_entropy_ICs = -1;
   int i, j, k;
   double dim[3];
@@ -48,8 +50,8 @@ int main(int argc, char *argv[]) {
 
   /* Read data */
   read_ic_single("input.hdf5", &us, dim, &parts, &gparts, &sparts, &Ngas,
-                 &Ngpart, &Nspart, &periodic, &flag_entropy_ICs, 1, 1, 0, 0, 0,
-                 1., 1., 1, 0);
+                 &Ngpart, &Nspart, &flag_entropy_ICs, 1, 1, 0, 0, 0, 1., 1., 1,
+                 0);
 
   /* Check global properties read are correct */
   assert(dim[0] == boxSize);
@@ -57,7 +59,6 @@ int main(int argc, char *argv[]) {
   assert(dim[2] == boxSize);
   assert(Ngas == L * L * L);
   assert(Ngpart == L * L * L);
-  assert(periodic == 1);
 
   /* Check particles */
   for (size_t n = 0; n < Ngas; ++n) {
diff --git a/tests/testRiemannTRRS.c b/tests/testRiemannTRRS.c
index 2c7098367a1ca8db84f097ad01aa2e1e411c433d..e975230c61cd58ad1a077e9b66949044cb7708da 100644
--- a/tests/testRiemannTRRS.c
+++ b/tests/testRiemannTRRS.c
@@ -16,8 +16,12 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
 
+/* Local headers. */
 #include <string.h>
+
+/* Local includes */
 #include "error.h"
 #include "riemann/riemann_trrs.h"
 #include "tools.h"
diff --git a/tests/testSPHStep.c b/tests/testSPHStep.c
index 63834d94b7696e160dd7ca487ab7e9f1e943abfb..41694872efbfc4d9611127eb1e6324b2b0fa5500 100644
--- a/tests/testSPHStep.c
+++ b/tests/testSPHStep.c
@@ -35,19 +35,19 @@ struct cell *make_cell(size_t N, float cellSize, int offset[3], int id_offset) {
   size_t x, y, z, size;
 
   size = count * sizeof(struct part);
-  if (posix_memalign((void **)&cell->parts, part_align, size) != 0) {
+  if (posix_memalign((void **)&cell->hydro.parts, part_align, size) != 0) {
     error("couldn't allocate particles");
   }
 
   size = count * sizeof(struct xpart);
-  if (posix_memalign((void **)&cell->xparts, xpart_align, size) != 0) {
+  if (posix_memalign((void **)&cell->hydro.xparts, xpart_align, size) != 0) {
     error("couldn't allocate extended particles");
   }
 
   h = 1.2348 * cellSize / N;
 
-  part = cell->parts;
-  xpart = cell->xparts;
+  part = cell->hydro.parts;
+  xpart = cell->hydro.xparts;
   memset(part, 0, count * sizeof(struct part));
   memset(xpart, 0, count * sizeof(struct xpart));
   for (x = 0; x < N; ++x) {
@@ -68,20 +68,20 @@ struct cell *make_cell(size_t N, float cellSize, int offset[3], int id_offset) {
   }
 
   cell->split = 0;
-  cell->h_max = h;
-  cell->count = count;
-  cell->gcount = 0;
-  cell->dx_max_part = 0.;
-  cell->dx_max_sort = 0.;
+  cell->hydro.h_max = h;
+  cell->hydro.count = count;
+  cell->grav.count = 0;
+  cell->hydro.dx_max_part = 0.;
+  cell->hydro.dx_max_sort = 0.;
   cell->width[0] = cellSize;
   cell->width[1] = cellSize;
   cell->width[2] = cellSize;
 
-  cell->ti_hydro_end_min = 1;
-  cell->ti_hydro_end_max = 1;
+  cell->hydro.ti_end_min = 1;
+  cell->hydro.ti_end_max = 1;
 
-  cell->sorted = 0;
-  for (int k = 0; k < 13; k++) cell->sort[k] = NULL;
+  cell->hydro.sorted = 0;
+  for (int k = 0; k < 13; k++) cell->hydro.sort[k] = NULL;
 
   return cell;
 }
@@ -128,9 +128,9 @@ int main(int argc, char *argv[]) {
 
   /* Set particle properties */
   for (j = 0; j < 27; ++j)
-    for (i = 0; i < cells[j]->count; ++i) {
-      cells[j]->parts[i].mass = dim * dim * dim * rho / (N * N * N);
-      cells[j]->parts[i].u = P / (hydro_gamma_minus_one * rho);
+    for (i = 0; i < cells[j]->hydro.count; ++i) {
+      cells[j]->hydro.parts[i].mass = dim * dim * dim * rho / (N * N * N);
+      cells[j]->hydro.parts[i].u = P / (hydro_gamma_minus_one * rho);
     }
 
   message("m=%f", dim * dim * dim * rho / (N * N * N));
@@ -171,7 +171,7 @@ int main(int argc, char *argv[]) {
   e.ti_current = 1;
 
   /* The tracked particle */
-  p = &(ci->parts[N * N * N / 2 + N * N / 2 + N / 2]);
+  p = &(ci->hydro.parts[N * N * N / 2 + N * N / 2 + N / 2]);
 
   message("Studying particle p->id=%lld", p->id);
 
@@ -209,10 +209,10 @@ int main(int argc, char *argv[]) {
   message("ti_end=%d", p->ti_end);
 
   for (int j = 0; j < 27; ++j) {
-    free(cells[j]->parts);
-    free(cells[j]->xparts);
+    free(cells[j]->hydro.parts);
+    free(cells[j]->hydro.xparts);
     for (int k = 0; k < 13; k++)
-      if (cells[j]->sort[k] != NULL) free(cells[j]->sort[k]);
+      if (cells[j]->hydro.sort[k] != NULL) free(cells[j]->hydro.sort[k]);
     free(cells[j]);
   }
 
diff --git a/tests/testSelectOutput.c b/tests/testSelectOutput.c
index 0b0adfa4e5a96f3431b27052bbb079f9be8838f2..5f3db8a4598d2dd0adab086a08c1f6208bd9b130 100644
--- a/tests/testSelectOutput.c
+++ b/tests/testSelectOutput.c
@@ -18,7 +18,7 @@
  ******************************************************************************/
 
 /* Some standard headers. */
-#include <stdlib.h>
+#include "../config.h"
 
 /* Includes. */
 #include "swift.h"
@@ -44,7 +44,6 @@ void select_output_engine_init(struct engine *e, struct space *s,
   e->time = 0;
   e->snapshot_output_count = 0;
   e->snapshot_compression = 0;
-  e->snapshot_label_delta = 1;
 };
 
 void select_output_space_init(struct space *s, double *dim, int periodic,
@@ -86,8 +85,8 @@ int main(int argc, char *argv[]) {
 
   char *base_name = "testSelectOutput";
   size_t Ngas = 0, Ngpart = 0, Nspart = 0;
-  int periodic = -1;
   int flag_entropy_ICs = -1;
+  int periodic = 1;
   double dim[3];
   struct part *parts = NULL;
   struct gpart *gparts = NULL;
@@ -112,8 +111,8 @@ int main(int argc, char *argv[]) {
   /* Read data */
   message("Reading initial conditions.");
   read_ic_single("input.hdf5", &us, dim, &parts, &gparts, &sparts, &Ngas,
-                 &Ngpart, &Nspart, &periodic, &flag_entropy_ICs, 1, 0, 0, 0, 0,
-                 1., 1., 1, 0);
+                 &Ngpart, &Nspart, &flag_entropy_ICs, 1, 0, 0, 0, 0, 1., 1., 1,
+                 0);
 
   /* pseudo initialization of the space */
   message("Initialization of the space.");
diff --git a/tests/testSymmetry.c b/tests/testSymmetry.c
index 886290ab984603d0afb3201377611598cd7163e4..1f0849bb9093948fa68d88984c285c44b403ba79 100644
--- a/tests/testSymmetry.c
+++ b/tests/testSymmetry.c
@@ -27,7 +27,10 @@
 
 void print_bytes(void *p, size_t len) {
   printf("(");
-  for (size_t i = 0; i < len; ++i) printf("%02x", ((unsigned char *)p)[i]);
+  for (size_t i = 0; i < len; ++i) {
+    printf("%02x", ((unsigned char *)p)[i]);
+    if (i % 4 == 3) printf("|");
+  }
   printf(")\n");
 }
 
@@ -162,8 +165,8 @@ void test(void) {
   if (i_not_ok) {
     printParticle_single(&pi, &xpi);
     printParticle_single(&pi2, &xpi);
-    print_bytes(&pj, sizeof(struct part));
-    print_bytes(&pj2, sizeof(struct part));
+    print_bytes(&pi, sizeof(struct part));
+    print_bytes(&pi2, sizeof(struct part));
     error("Particles 'pi' do not match after density (byte = %d)", i_not_ok);
   }
   if (j_not_ok) {
@@ -174,6 +177,38 @@ void test(void) {
     error("Particles 'pj' do not match after density (byte = %d)", j_not_ok);
   }
 
+    /* --- Test the gradient loop --- */
+#ifdef EXTRA_HYDRO_LOOP
+
+  /* Call the symmetric version */
+  runner_iact_gradient(r2, dx, pi.h, pj.h, &pi, &pj, a, H);
+
+  /* Call the non-symmetric version */
+  runner_iact_nonsym_gradient(r2, dx, pi2.h, pj2.h, &pi2, &pj2, a, H);
+  dx[0] = -dx[0];
+  dx[1] = -dx[1];
+  dx[2] = -dx[2];
+  runner_iact_nonsym_gradient(r2, dx, pj2.h, pi2.h, &pj2, &pi2, a, H);
+
+  i_not_ok = memcmp((char *)&pi, (char *)&pi2, sizeof(struct part));
+  j_not_ok = memcmp((char *)&pj, (char *)&pj2, sizeof(struct part));
+
+  if (i_not_ok) {
+    printParticle_single(&pi, &xpi);
+    printParticle_single(&pi2, &xpi);
+    print_bytes(&pi, sizeof(struct part));
+    print_bytes(&pi2, sizeof(struct part));
+    error("Particles 'pi' do not match after gradient (byte = %d)", i_not_ok);
+  }
+  if (j_not_ok) {
+    printParticle_single(&pj, &xpj);
+    printParticle_single(&pj2, &xpj);
+    print_bytes(&pj, sizeof(struct part));
+    print_bytes(&pj2, sizeof(struct part));
+    error("Particles 'pj' do not match after gradient (byte = %d)", j_not_ok);
+  }
+#endif
+
   /* --- Test the force loop --- */
 
   /* Call the symmetric version */
@@ -220,17 +255,15 @@ void test(void) {
     j_not_ok |= c_is_d;
   }
 #else
-  i_not_ok =
-      strncmp((const char *)&pi, (const char *)&pi2, sizeof(struct part));
-  j_not_ok =
-      strncmp((const char *)&pj, (const char *)&pj2, sizeof(struct part));
+  i_not_ok = memcmp((char *)&pi, (char *)&pi2, sizeof(struct part));
+  j_not_ok = memcmp((char *)&pj, (char *)&pj2, sizeof(struct part));
 #endif
 
   if (i_not_ok) {
     printParticle_single(&pi, &xpi);
     printParticle_single(&pi2, &xpi);
-    print_bytes(&pj, sizeof(struct part));
-    print_bytes(&pj2, sizeof(struct part));
+    print_bytes(&pi, sizeof(struct part));
+    print_bytes(&pi2, sizeof(struct part));
     error("Particles 'pi' do not match after force (byte = %d)", i_not_ok);
   }
   if (j_not_ok) {
diff --git a/tests/testTimeIntegration.c b/tests/testTimeIntegration.c
index 2034c402a2d626a7b503613f6cade821ec438151..b7f5201356ee52419038c8379dde14c9bab82055 100644
--- a/tests/testTimeIntegration.c
+++ b/tests/testTimeIntegration.c
@@ -83,9 +83,9 @@ int main(int argc, char *argv[]) {
   xparts[0].v_full[2] = 0.;
 
   /* Set the particle in the cell */
-  c.parts = parts;
-  c.xparts = xparts;
-  c.count = 1;
+  c.hydro.parts = parts;
+  c.hydro.xparts = xparts;
+  c.hydro.count = 1;
   c.split = 0;
 
   /* Create an engine and a fake runner */
@@ -108,11 +108,13 @@ int main(int argc, char *argv[]) {
     eng.time += dt;
 
     /* Compute gravitational acceleration */
-    float r2 =
-        c.parts[0].x[0] * c.parts[0].x[0] + c.parts[0].x[1] * c.parts[0].x[1];
+    float r2 = c.hydro.parts[0].x[0] * c.hydro.parts[0].x[0] +
+               c.hydro.parts[0].x[1] * c.hydro.parts[0].x[1];
     float r = sqrtf(r2);
-    c.parts[0].a_hydro[0] = -(G * M_sun * c.parts[0].x[0] / r * r * r);
-    c.parts[0].a_hydro[1] = -(G * M_sun * c.parts[0].x[1] / r * r * r);
+    c.hydro.parts[0].a_hydro[0] =
+        -(G * M_sun * c.hydro.parts[0].x[0] / r * r * r);
+    c.hydro.parts[0].a_hydro[1] =
+        -(G * M_sun * c.hydro.parts[0].x[1] / r * r * r);
 
     /* Kick... */
     runner_do_kick2(&run, &c, 0);
diff --git a/tests/testVoronoi2D.c b/tests/testVoronoi2D.c
index 60a71624904c11a3cdb3b90906189df60bfc6956..5057278efaa3ba0e1ccec2ba6b032cd12b029ff9 100644
--- a/tests/testVoronoi2D.c
+++ b/tests/testVoronoi2D.c
@@ -16,6 +16,9 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
+
+/* Local headers. */
 #include "hydro/Shadowswift/voronoi2d_algorithm.h"
 #include "tools.h"
 
diff --git a/tests/testVoronoi3D.c b/tests/testVoronoi3D.c
index db5c33aa6e4ef0792373febd5d773a6d1198db29..5e0288fa9b3e13e0c6a6fb13db202e0f73f29a5b 100644
--- a/tests/testVoronoi3D.c
+++ b/tests/testVoronoi3D.c
@@ -16,8 +16,12 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  *
  ******************************************************************************/
+#include "../config.h"
 
+/* Some standard headers. */
 #include <stdlib.h>
+
+/* Local headers. */
 #include "error.h"
 #include "hydro/Shadowswift/voronoi3d_algorithm.h"
 #include "part.h"
diff --git a/tests/tolerance_125_perturbed.dat b/tests/tolerance_125_perturbed.dat
index 9987f8a0703a6106f41b73c1a16b4cea8af3bc1e..95f5f78246a82b7c326c87f9b4edbac4f51c65e9 100644
--- a/tests/tolerance_125_perturbed.dat
+++ b/tests/tolerance_125_perturbed.dat
@@ -1,4 +1,4 @@
 #   ID    pos_x    pos_y    pos_z      v_x      v_y      v_z        h      rho    div_v        S        u        P        c      a_x      a_y      a_z     h_dt    v_sig    dS/dt    du/dt
     0	  1e-4	   1e-4	    1e-4       1e-4	1e-4	 1e-4	    1e-4   1e-4	  1e-4	       1e-4	1e-4	 1e-4	  1e-4	 1e-4	  1e-4	   1e-4	   1e-4	   1e-4	    1e-4     1e-4
-    0	  1e-4	   1e-4	    1e-4       1e-4	1e-4	 1e-4	    1e-4   1e-4	  1e-4	       1e-4	1e-4	 1e-4	  1e-4	 2.3e-3	  2e-3	   2e-3	   1e-4	   1e-4	    1e-4     1e-4
+    0	  1e-4	   1e-4	    1e-4       1e-4	1e-4	 1e-4	    1e-4   1e-4	  1e-4	       1e-4	1e-4	 1e-4	  1e-4	 3.6e-3	  2e-3	   2e-3	   1e-4	   1e-4	    1e-4     1e-4
     0	  1e-6	   1e-6	    1e-6       1e-6	1e-6	 1e-6	    1e-6   1e-6	  1e-6	       1e-6	1e-6	 1e-6	  1e-6	 2e-4	  2e-4	   2e-4	   1e-6	   1e-6	    1e-6     1e-6
diff --git a/theory/Cooling/bibliography.bib b/theory/Cooling/bibliography.bib
new file mode 100644
index 0000000000000000000000000000000000000000..c0277fed06c19dbc428978517afc395d7e57d474
--- /dev/null
+++ b/theory/Cooling/bibliography.bib
@@ -0,0 +1,15 @@
+@ARTICLE{Wiersma2009,
+   author = {{Wiersma}, R.~P.~C. and {Schaye}, J. and {Smith}, B.~D.},
+    title = "{The effect of photoionization on the cooling rates of enriched, astrophysical plasmas}",
+  journal = {\mnras},
+archivePrefix = "arXiv",
+   eprint = {0807.3748},
+ keywords = {atomic processes , plasmas , cooling flows , galaxies: formation , intergalactic medium},
+     year = 2009,
+    month = feb,
+   volume = 393,
+    pages = {99-107},
+      doi = {10.1111/j.1365-2966.2008.14191.x},
+   adsurl = {http://adsabs.harvard.edu/abs/2009MNRAS.393...99W},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
diff --git a/theory/Cooling/eagle_cooling.tex b/theory/Cooling/eagle_cooling.tex
new file mode 100644
index 0000000000000000000000000000000000000000..db01f65f4cd4a48a66c62e640b9c0165626f4bdf
--- /dev/null
+++ b/theory/Cooling/eagle_cooling.tex
@@ -0,0 +1,358 @@
+\documentclass[fleqn, usenatbib, useAMS, a4paper]{mnras}
+\usepackage{graphicx}
+\usepackage{amsmath,paralist,xcolor,xspace,amssymb}
+\usepackage{times}
+\usepackage{comment}
+\usepackage[super]{nth}
+
+\newcommand{\todo}[1]{{\textcolor{red}{TODO: #1}\\}}
+\newcommand{\swift}{{\sc Swift}\xspace}
+
+\newcommand{\D}[2]{\frac{d#1}{d#2}}
+\newcommand{\LL}{\left(}
+\newcommand{\RR}{\right)}
+
+\title{Integration scheme for cooling}
+\author{Alexei Borissov, Matthieu Schaller}
+
+\begin{document}
+
+\maketitle
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Basic principles}
+
+\subsection{Isochoric cooling}
+
+\todo{MATTHIEU: Discuss the fact that we want to do cooling at constant
+  density.}
+
+\subsection{Time integration}
+
+We want to compute the change in internal energy of a given particle
+due to the interaction of the gas with the background radiation. More
+specifically we want to integrate the following equation:
+\begin{equation}
+  u_{\rm final} \equiv u(t+\Delta t) = u(t) + \left(\frac{\partial u}{\partial t}\bigg|_{\rm
+    hydro} + \frac{\partial u}{\partial t}\bigg|_{\rm cooling}\right)
+  \times \Delta t.
+\end{equation}
+The first derivative is given by the SPH scheme, the second one is
+what we are after here. We start by computing the internal energy the
+particle would have in the absence of cooling:
+\begin{equation}
+  u_0 \equiv u(t) + \frac{\partial u}{\partial t}\bigg|_{\rm
+    hydro} \times \Delta t.
+\end{equation}
+We then proceed to solve the implicit equation
+\begin{equation}\label{implicit-eq}
+ u_{\rm final} = u_0 + \lambda(u_{\rm final}) \Delta t,
+\end{equation}
+where $\lambda$ is the cooling rate\footnote{Note this is not the
+  physical cooling rate $\Lambda/n_{\rm H}^2$ that is commonly
+  used. This is the change in energy over time $\frac{\partial
+    u}{\partial t}\big|_{\rm cool}$ from all the channels
+  including all the multiplicative factors coming in front of the
+  physical $\Lambda$.}, which for a given particle varies
+only with respect to $u$ throughout the duration of the timestep. The
+other dependencies of $\lambda$ (density, metallicity and redshift)
+are kept constant over the course of $\Delta t$. Crucially, we want to
+evaluate $\lambda$ at the end of the time-step. Once a solution to this
+implicit problem has been found, we get the total cooling rate:
+\begin{equation}
+  \frac{\partial u}{\partial t}\bigg|_{\rm total} \equiv \frac{u_{\rm final} -
+    u(t)}{\Delta t},
+\end{equation}
+leading to the following total equation of motion for internal energy:
+\begin{equation}
+  u(t+\Delta t) = u(t) + \frac{\partial u}{\partial t}\bigg|_{\rm
+    total} \times \Delta t.
+\end{equation}
+The time integration is then performed in the regular time integration
+section of the code. Note that, as expected, if $\lambda=0$ the whole
+processes reduces to a normal hydro-dynamics only time integration of
+the internal energy.
+
+Finally, for schemes evolving entropy $A$ instead of internal energy
+$u$ (or for that matter any other thermodynamic quantity), we convert
+the entropy derivative coming from the hydro scheme to an internal
+energy derivative, solve the implicit cooling problem using internal
+energies and convert the total time derivative back to an entropy
+derivative. Since we already assume that cooling is performed at
+constant density, there is no loss in accuracy happening via this
+conversion procedure.
+
+\subsubsection{Energy floor and prediction step}
+
+In most applications, the cooling is not allowed to bring the internal
+energy below a certain value $u_{\rm min}$, usually expressed in the
+form of a minimal temperature. Additionally, and even in the absence
+of such a temperature floor, we must ensure that the energy does not
+become negative.
+
+Since the time-step size is not chosen in a way to fulfil these
+criteria, we have to limit the total rate of change of energy such
+that the limits are not reached. In practice this means modifying
+$\frac{\partial u}{\partial t}\big|_{\rm total}$ such that
+\begin{equation}
+  u(t) + \frac{\partial u}{\partial t}\bigg|_{\rm total} \times \Delta t \geq
+  u_{\rm min}
+\end{equation}
+is true. In the vast majority of cases, there is no need to modify the
+energy derivative but this may be necessary for some rapidly cooling
+particles.
+
+The time integration uses a leapfrog algorithm in its
+``Kick-Drift-Kick'' form. In the cases, where the time-step is
+constant, the condition as written above would be sufficient, however
+with variable $\Delta t$ this needs modifying. If the next time-step
+of a particle decreases in size, then the condition above will remain
+true. However, if the time-step size increases then we may violate the
+condition and integrate the energy to a value below $u_{\rm min}$. The
+time-step size is chosen in-between the application of the two kick
+operators\footnote{Effectively creating the chain
+  ``Kick-Drift-Kick-Timestep'', where the last operation fixes the
+  time-step size for the next kick-drift-kick cycle.}. We hence have
+to ensure that the integration of one half of the current step (the
+second kick) and one half of the next step (the first kick) does not
+lead to a value below the allowed minimum. In \swift, we do not allow
+the time-step to increase by more than a factor of $2$. This implies
+that we will at most integrate the internal energy forward in time for
+$1.5\Delta t$, where $\Delta t$ is the current value of the time-step
+size we used in all the equations thus far. An additional subtlety
+does, however, enter the scheme. The internal energy is not just used
+in the Kick operator. Because of the needs of the SPH scheme, the
+particles have to carry an estimate of the entropy at various points
+in time inside the step of this particle. This is especially important
+for inactive particles that act as sources for neighbouring active
+particles. We must hence not only protect for the next half-kick, we
+must also ensure that the value we estimated will be valid over the
+next drift step as well. This means completing the current half-kick
+and the next full drift, which could in principle double in size; this
+implies checking the limits over the next $2.5\Delta t$. However, for
+that variable, since it is an extrapolation and not the actual
+variable we integrate forward in time, we do not need to enforce the
+$u_{\rm min}$ limit. We must only ensure that the energy remains
+positive. Combining those two conditions, we conclude that we must
+enforce two limits:
+\begin{equation}
+  \left\lbrace
+  \begin{array}{ll}
+  \displaystyle\frac{\partial u}{\partial t}\bigg|_{\rm total}  \geq
+  -\displaystyle\frac{u(t) - u_{\rm min} }{1.5 \Delta t}, \Bigg.  \\
+  \displaystyle\frac{\partial u}{\partial t}\bigg|_{\rm total}  \geq
+  -\displaystyle\frac{u(t) - u_{\rm min} }{(2.5 + \epsilon) \Delta t},  
+  \end{array}
+  \right.
+\end{equation}
+where in the second equation we added a small value $\epsilon$ to
+ensure that we will not get negative values because of rounding errors.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\section{Solution to the implicit cooling problem}
+
+In this section we describe the integration scheme used to compute the 
+cooling rate. It consists of an explicit solver for cases where the 
+cooling rate is small, a solver based on the Newton-Raphson method, and
+one based on the bisection method. 
+
+\subsection{Explicit solver}
+
+For many particles the cooling occuring over a timestep will be small 
+(for example, if a particle is at the equilibrium temperature and was not
+heated by other means such as shock heating). In these cases $\lambda(u_0)
+\simeq \lambda(u_{final})$, so an explicit solution to compute $u_{final}$
+may be used as a faster alternative to a more complicated implicit scheme. 
+More specifically, if $\lambda(u_0) dt < \varepsilon u_0$ we set
+\begin{equation}
+u_{final} = u_0 + \lambda(u_0) dt,
+\end{equation}
+where $\varepsilon$ is a small constant, set to $0.05$ to be consistent 
+with the EAGLE simulations. 
+
+In cases where $\lambda(u_0) dt > \varepsilon u_0$ one of two implicit 
+methods are used, either the Newton-Raphson method, which benefits from
+faster convergence, however is not guaranteed to converge, or the 
+bisection method, which is slower but always converges. 
+
+\subsection{Newton-Raphson method}
+
+Equation \ref{implicit-eq} may be rearranged so that we are trying to
+find the root of 
+\begin{equation}\label{fu-eq}
+f(u_{final}) = u_{final} - u_0 - \lambda(u_{final}) dt = 0.
+\end{equation}
+This may be done iteratively using the Newton-Raphson method obtaining
+consecutive approximations to $u_{final}$ by 
+\begin{equation}
+u_{n+1} = u_n - \frac{f(u_n)}{df(u_n)/du}.
+\end{equation}
+In some cases a negative value of $u_{n+1}$ may be calculated. To 
+prevent the occurance of negative internal energies during the
+calculation we introduce $x = \log (u_{final})$, so that we solve
+\begin{equation}\label{fx-eq}
+f(x) = e^x - u_0 - \lambda(e^x) dt = 0
+\end{equation}
+instead of \ref{fu-eq}. Thus we obtain consecutive approximations of 
+the root of $f$ by the formula $x_{n+1} = x_n - f(x_n)/f'(x_n)$. This 
+leads to
+\begin{equation}
+x_{n+1} = x_n - \frac{1 - u_0 e^{-x_n} -\lambda(e^{x_n})e^{-x_n}dt}{1
+  - \frac{d\lambda}{du}(e^{x_n}) dt}.
+\end{equation}
+
+The tables used for EAGLE cooling in fact depend on temperature rather 
+than internal energy and include a separate table to convert from
+internal energy to temperature. Hence, to obtain the gradient we use
+\begin{align*}
+  \D \lambda u &= \D \lambda T \D T u \\
+               &= \frac{\lambda(T_{high,n})
+    - \lambda(T_{low,n})}{T_{high,n} - T_{low,n}}
+                  \frac{T(u_{high,n})
+    - T(u_{low,n})}{u_{high,n} - u_{low,n}},
+\end{align*}
+where $T_{\rm high,n}, u_{\rm high,n}$ and $T_{\rm low,n}, u_{\rm low,n}$ 
+are values of the temperature and internal energy grid bracketing the current 
+temperature and internal energy for the iteration in Newton's method 
+(e.g. $u_{high,n} \ge u_n \ge u_{low,n}$).
+
+The initial guess for the Newton-Raphson method is taken to be $x_0 = \log(u_0)$.
+If in the first iteration the sign of the $\lambda$ changes the next
+guess to correspond to the equilibrium temperature (i.e. $10^4$K). 
+
+A particle is considered to have converged if the relative error in
+the internal energy is sufficiently small. This can be formulated as
+\begin{align*}
+\frac{u_{n+1} - u_n}{u_{n+1}} &< C \\
+u_{n+1} - u_n &< Cu_{n+1} \\
+\LL 1-C\RR u_{n+1} &< u_n \\
+\frac{u_{n+1}}{u_n} &< \frac{1}{1-C} \\
+x_{n+1} - x_n = \log\frac{u_{n+1}}{u_n} &< -\log\LL 1-C \RR \simeq C.
+\end{align*}
+Since the grid spacing in the internal energy of the Eagle tables is
+0.045 in $\log_{10}u$ we take $C = 10^{-2}$.
+
+In cases when the Newton-Raphson method doesn't converge within a specified
+number of iterations we revert to the bisection method. In order to use
+the Newton-Raphson method a parameter (EagleCooling:newton\_integration) in 
+the yaml file needs to be set to 1.
+
+\subsection{Bisection method}
+
+In order to guarantee convergence the bisection method is used to solve 
+equation \ref{fu-eq} The implementation is the same as in the EAGLE 
+simulations, but is described here for convenience. 
+
+First a small interval is used to bracket the solution. The interval bounds
+are defined as $u_{upper} = \kappa u_0$ and $u_{lower} = \kappa^{-1} u_0$, 
+with $\kappa = \sqrt{1.1}$ as specified in EAGLE. If the particle is cooling
+($\lambda(u_0) < 0$) $u_{upper}$ and $u_{lower}$ are iteratively decreased
+by factors of $\kappa$ until $f(u_{lower}) < 0$. Alternatively, if the 
+particle is initially heating ($\lambda(u_0) > 0$) the bounds are iteratively
+increased by factors of $\kappa$ until $f(u_{upper}) > 0$. Once the bounds
+are obtained, the bisection scheme is performed as normal. 
+
+\section{EAGLE cooling tables}
+
+We use the same cooling tables as used in EAGLE, specifically those found in 
+\cite{Wiersma2009} and may be found at http://www.strw.leidenuniv.nl/WSS08/. 
+These tables contain pre-computed values of the cooling rate for a given 
+redshift, metallicity, hydrogen number density and temperature produced using 
+the package CLOUDY. When calculating the cooling rate for particles at 
+redshifts higher than the redshift of reionisation the tables used do not 
+depend on redshift, but only on metallicity, hydrogen number density and 
+temperature. These tables are linearly interpolated based on the particle
+based on the particle properties. 
+
+Since these tables specify the cooling rate in terms of temperature, the internal
+energy of a particle needs to be converted to a temperature in a way which takes 
+into account the ionisation state of the gas. This is done by interpolating a 
+pre-computed table of values of temperature depending on redshift, hydrogen number
+density, helium fraction and internal energy (again, for redshifts higher than the 
+redshift of reionisation this table does not depend on redshift). 
+
+Inverse Compton cooling is not accounted for in the high redshift tables, so prior 
+to reionisation it is taken care of by an analytical formula,
+\begin{equation}
+\frac{\Lambda_{compton}}{n_h^2} = -\Lambda_{0,compton} \left( T - T_{CMB}(1+z) 
+\right) (1+z)^4 \frac{n_e}{n_h},
+\end{equation}
+which is added to the cooling rate interpolated from the tables. Here $n_h$ is the
+hydrogen number density, $T$ the temperature of the particle, $T_{CMB} = 2.7255$K 
+the temperature of the CMB, $z$ the redshift, $n_e$ the hydrogen and helium electron
+number density, and $\Lambda_{0,compton} = 1.0178085 \times 10^{-37} g \cdot cm^2 
+\cdot s^{-3} \cdot K^{-5}$. 
+
+\section{Co-moving time integration}
+
+In the case of cosmological simulations, the equations need to be
+slightly modified to take into account the expansion of the
+Universe. The code uses the comoving internal energy $u' =
+a(t)^{3(\gamma-1)}u$ or comoving entropy $A'=A$ as thermodynamic
+variable. The equation of motion for the variable are then modified
+and take the following form:
+\begin{equation}
+  \frac{\partial u'_i}{\partial t} = \frac{\partial u'_i}{\partial
+    t}\bigg|_{\rm hydro}  = \frac{1}{a(t)^2} Y'_i(t)\big|_{\rm
+    hydro},
+\end{equation}
+where $Y_i$ is computed from the particle itself and its neighbours
+and corresponds to the change in internal energy due to hydrodynamic
+forces. We then integrate the internal energy forward in time using
+\begin{equation}
+  u'_i(t+\Delta t) = u'_i(t) + Y'_i(t)\big|_{\rm hydro} \times \underbrace{\int_t^{t+\Delta t}
+  \frac{1}{a(t)^2} dt}_{\Delta t_{\rm therm}}.
+\end{equation}
+The exact same equations apply in the case of a hydrodynamics scheme
+evolving entropy (see cosmology document). We note that this is
+different from the choice made in Gadget where there is no $a^{-2}$
+term as it is absorbed in the definition in $Y'_i$ itself. As a
+consequence $\Delta t_{\rm therm}$ is just $\Delta t$.
+
+In order to compute the
+cooling rate of a particle, we convert quantities to physcial
+coordinates. Given the appearence of scale-factors in some of these
+equations, we have to be careful to remain consistent throughout. We
+start by constructing the co-moving internal energy at the end of the
+time-step in the absence of cooling:
+\begin{equation}
+  u'_0 \equiv u'(t) + Y'_i(t)\big|_{\rm hydro} \times \Delta t_{\rm therm},
+\end{equation}
+which we then convert into a physical internal energy alongside the
+thermal energy at the current time:
+\begin{align}
+  u(t) &= a^{3(1-\gamma)}u'(t),\\
+  u_0 &= a^{3(1-\gamma)}u'_0.
+\end{align}
+We can then solve the implicit cooling problem in the same way as in
+the non-comoving case and obtain
+\begin{equation}
+  u_{\rm final} = u_0 + \lambda(u_{\rm final}) \Delta t.
+\end{equation}
+We note that the $\Delta t$ here is the actual time between the start
+and end of the step; unlike $\Delta t_{\rm therm}$ there are no
+scale-factors entering that term. The solution to the implicit problem
+in physical coordinates yields the definition of the total time
+derivative of internal energy:
+\begin{equation}
+  \frac{\partial u}{\partial t}\bigg|_{\rm total} \equiv \frac{u_{\rm final} -
+    u(t)}{\Delta t}.
+\end{equation}
+This allows us to construct the total eveolution of co-moving energy:
+\begin{equation}
+  Y'_i(t)\big|_{\rm total} = a^{3(\gamma-1)} \times \frac{\Delta t}{\Delta
+    t_{\rm therm}} \times
+  \frac{\partial u}{\partial t}\bigg|_{\rm total},
+\end{equation}
+where the first term is is the conversion from physical to co-moving
+internal energy and the second term is required by our definition of
+our time integration opertator. The time integration routine then performs the
+same calculation as in the non-cooling case:
+\begin{equation}
+  u'_i(t+\Delta t) = u'_i(t) + Y'_i(t)\big|_{\rm total} \times {\Delta t_{\rm therm}}.
+\end{equation}
+
+\bibliographystyle{mnras}
+\bibliography{./bibliography.bib}
+
+\end{document}
diff --git a/theory/Cooling/run.sh b/theory/Cooling/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..17ae407ba66b36b5f192f2b97f7d216a17af26a0
--- /dev/null
+++ b/theory/Cooling/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "Generating PDF..."
+pdflatex -jobname=eagle_cooling eagle_cooling.tex
+bibtex eagle_cooling.aux
+pdflatex -jobname=eagle_cooling eagle_cooling.tex
+pdflatex -jobname=eagle_cooling eagle_cooling.tex
diff --git a/theory/Cosmology/artificialvisc.tex b/theory/Cosmology/artificialvisc.tex
new file mode 100644
index 0000000000000000000000000000000000000000..55cbe2756714f875c2a9c52d7afae49499b4328b
--- /dev/null
+++ b/theory/Cosmology/artificialvisc.tex
@@ -0,0 +1,75 @@
+\subsection{Cosmological factors for properties entering the artificial viscosity}
+\label{ssec:artificialvisc}
+
+There are multiple properties that enter into the more complex artificial
+viscosity schemes, such as those by \citet{Morris1997} (henceforth M\&M) and
+\citet{Cullen2010} (henceforth C\&D).
+
+\subsubsection{M\&M basic scheme}
+\label{sssec:mandm}
+
+This relies on the velocity divergence as a shock indicator, i.e. the property
+$\nabla \cdot \mathbf{v}$. The interpretation of this is the velocity divergence of
+the fluid overall, i.e. the physical velocity divergence. Starting with
+\begin{equation}
+\mathbf{v}_p = a \dot{\mathbf{r}}' + \dot{a} \mathbf{r}', \nonumber
+\end{equation}
+with the divergence,
+\begin{equation}
+\nabla \cdot \mathbf{v}_p =
+    \nabla \cdot \left(a \dot{\mathbf{r}}'\right) +
+    \nabla \cdot \left(\dot{a} \mathbf{r}'\right). \nonumber
+\end{equation}
+The quantity on the left is the one that we want to enter the source term for the
+artificial viscosity. Transforming to the co-moving derivative on the right hand side
+to enable it to be calculated in the code,
+\begin{equation}
+\nabla \cdot \mathbf{v}_p = 
+    \nabla' \cdot \dot{\mathbf{r}}' + n_d H(a),
+\label{eqn:divvwithcomovingcoordinates}
+\end{equation}
+with $n_d$ the number of spatial dimensions, and the final transformation
+being the one to internal code velocity units,
+\begin{equation}
+\nabla \cdot \mathbf{v}_p = 
+    \frac{1}{a^2} \nabla' \cdot \mathbf{v}' + n_d H(a).
+\label{eqn:divvcodeunits}
+\end{equation}
+We note that there is no similar hubble flow term in the expression for
+$\nabla \times \mathbf{v}_p$.
+
+In some more complex schemes, such as the one presented by \cite{Cullen2010},
+the time differential of the velocity divergence is used as a way to differentiate
+the pre- and post-shock region.
+
+Building on the above, we take the time differential of both sides,
+\begin{equation}
+    \frac{{\mathrm d}}{{\mathrm d} t} \nabla \cdot \mathbf{v}_p = 
+    \frac{{\mathrm d}}{{\mathrm d} t} \left(
+    	\frac{1}{a^2} \nabla' \cdot \mathbf{v}' + n_d H(a)
+    \right).
+    \nonumber
+\end{equation}
+Collecting the factors, we see
+\begin{align}
+    \frac{{\mathrm d}}{{\mathrm d} t} \nabla \cdot \mathbf{v}_p = 
+    \frac{1}{a^2} &\left(
+    	\frac{{\mathrm d}}{{\mathrm d} t} \nabla ' \cdot \mathbf{v}' -
+    	2H(a) \nabla' \cdot \mathbf{v}'
+    \right) \\
+    + n_d &\left(
+    	\frac{\ddot{a}}{a} - \frac{\dot{a}}{a^2}
+    \right).
+    \label{eqn:divvdtcodeunits}
+\end{align}
+This looks like quite a mess, but in most cases we calculate this implicitly
+from the velocity divergence itself, and so we do not actually need to take
+into account these factors; i.e. we actually calculate
+\begin{equation}
+    \frac{\mathrm d}{{\mathrm d} t} \nabla \cdot \mathbf{v}_p = 
+    \frac{
+    	\nabla \cdot \mathbf{v}_p (t + {\mathrm d}t) - \nabla \cdot \mathbf{v}_p (t)
+    }{dt},
+	\label{eqn:divvdtcodeunitsimplicit}
+\end{equation}
+meaning that the above is taken into account self-consistently.
\ No newline at end of file
diff --git a/theory/Cosmology/bibliography.bib b/theory/Cosmology/bibliography.bib
index 6979bf7dd23bdb8543ac8752c12432837480d4ed..84cec263d2e8195bc831e672184b41d61479fcc2 100644
--- a/theory/Cosmology/bibliography.bib
+++ b/theory/Cosmology/bibliography.bib
@@ -137,4 +137,42 @@ issn = "0021-9991",
 doi = "https://doi.org/10.1006/jcph.1997.5732",
 url = "http://www.sciencedirect.com/science/article/pii/S0021999197957326",
 author = "J.J. Monaghan"
-}
\ No newline at end of file
+}
+@article{Cullen2010,
+author = {Cullen, Lee and Dehnen, Walter},
+title = {{Inviscid smoothed particle hydrodynamics}},
+journal = {Monthly Notices of the Royal Astronomical Society},
+year = {2010},
+volume = {408},
+number = {2},
+pages = {669--683},
+month = oct,
+annote = {14 pages (15 in arXiv), 15 figures, accepted for publication in MNRAS}
+}
+
+@article{Morris1997,
+author = {Morris, J P and Monaghan, J J},
+title = {{A Switch to Reduce SPH Viscosity}},
+journal = {Journal of Computational Physics},
+year = {1997},
+volume = {136},
+number = {1},
+pages = {41--50},
+month = sep
+}
+
+@ARTICLE{Springel2010,
+   author = {{Springel}, V.},
+    title = "{E pur si muove: Galilean-invariant cosmological hydrodynamical simulations on a moving mesh}",
+  journal = {\mnras},
+archivePrefix = "arXiv",
+   eprint = {0901.4107},
+ keywords = {methods: numerical, galaxies: interactions, cosmology: dark matter},
+     year = 2010,
+    month = jan,
+   volume = 401,
+    pages = {791-851},
+      doi = {10.1111/j.1365-2966.2009.15715.x},
+   adsurl = {http://adsabs.harvard.edu/abs/2010MNRAS.401..791S},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
diff --git a/theory/Cosmology/coordinates.tex b/theory/Cosmology/coordinates.tex
index 38a571aefea68fbe1bc7a8ebc3867109f1c4736e..a1dbff71c13cbd62acde83c14e9e81f0fbc41214 100644
--- a/theory/Cosmology/coordinates.tex
+++ b/theory/Cosmology/coordinates.tex
@@ -88,13 +88,13 @@ gravitational terms. SPH flavours that evolve the internal energy $u$ instead of
 entropy require the additional equation of motion describing the evolution of
 $u'$:
 \begin{equation}
-  \dot{u}_i' = \frac{P_i'}{\rho_i'^2}\left[3H\rho_i' + \frac{1}{a^2}f_i'\sum_jm_j\left(\mathbf{v}_i' -
-    \mathbf{v}_j'\right)\cdot\mathbf{\nabla}_i'W_{ij}'(h_i)\right],
+  \dot{u}_i' = \frac{1}{a^2}\frac{P_i'}{\rho_i'^2} f_i'\sum_jm_j\left(\mathbf{v}_i' -
+    \mathbf{v}_j'\right)\cdot\mathbf{\nabla}_i'W_{ij}'(h_i).
   \label{eq:cosmo_eom_u}
 \end{equation}
-where the first term in the brackets accounts for the change in energy
-due to the expansion of the Universe. The scale-factors appearing in
-the equations are later absorbed in the time-integration operators
+
+In all these cases, the scale-factors appearing in the equations are
+later absorbed in the time-integration operators
 (Sec.~\ref{ssec:operators}) such that the RHS of the equations of
 motions is identical for the primed quantities to the ones obtained in
 the non-cosmological case for the physical quantities.
diff --git a/theory/Cosmology/cosmology_standalone.tex b/theory/Cosmology/cosmology_standalone.tex
index 31a96d3a002aae423b2f8e16ef3044e357fdea6a..5b5fa228fe4cd1c5cfbd64a5ddb7a7ec466fa7a7 100644
--- a/theory/Cosmology/cosmology_standalone.tex
+++ b/theory/Cosmology/cosmology_standalone.tex
@@ -3,6 +3,7 @@
 \usepackage{amsmath,paralist,xcolor,xspace,amssymb}
 \usepackage{times}
 \usepackage{comment}
+\usepackage{bbold}
 \usepackage[super]{nth}
 
 \newcommand{\todo}[1]{{\textcolor{red}{#1}}}
@@ -42,6 +43,10 @@ Making cosmology great again.
 
 \input{timesteps}
 
+\input{artificialvisc}
+
+\input{gizmo}
+
 \bibliographystyle{mnras}
 \bibliography{./bibliography.bib}
 
diff --git a/theory/Cosmology/gizmo.tex b/theory/Cosmology/gizmo.tex
new file mode 100644
index 0000000000000000000000000000000000000000..785cb673bb0a4e60a843ae02d26d8a7ecbee9f74
--- /dev/null
+++ b/theory/Cosmology/gizmo.tex
@@ -0,0 +1,194 @@
+\subsection{Derviation of the GIZMO equations}
+
+For GIZMO and other finite volume/mass schemes, the equations of motion can no
+longer be derived from a Lagrangian. Instead, we have to explicitly transform
+the Euler equations to comoving coordinates and variables. In a physical
+coordinate frame, these equations are
+\begin{align}
+\frac{\partial{}\rho{}}{\partial{}t} +
+\nabla{} \cdot \left(\rho{} \mathbf{v}_{\rm tot} \right)
+&= 0, \\
+\frac{\partial{} \rho{} \mathbf{v}_{\rm tot} }{\partial{} t} +
+\nabla{} \cdot \left( \rho{} \mathbf{v}_{\rm tot} \mathbf{v}_{\rm tot} +
+P \mathbb{1} \right) &= 0.
+\end{align}
+\begin{multline}
+\frac{\partial{}}{\partial{} t} \left( \rho{} u + \frac{1}{2} \rho{}
+\mathbf{v}_{\rm{} tot}\cdot\mathbf{v}_{\rm{} tot} \right) +\\
+\nabla{} \cdot \left( \rho{} u \mathbf{v}_{\rm{} tot} +
+\frac{1}{2} \rho{} \left(\mathbf{v}_{\rm{} tot} \cdot \mathbf{v}_{\rm{} tot}\right)
+\mathbf{v}_{\rm{} tot} + P \mathbf{v}_{\rm{} tot} \right) = 0,
+\end{multline}
+where $\mathbb{1}$ is the unit tensor.
+For simplicity, we will rewrite the last two equations in terms of the quantities
+$\mathbf{v}_{\rm{} tot}$ and $u$:
+\begin{align}
+\frac{\partial{} \mathbf{v}_{\rm{} tot}}{\partial{} t} +
+\left(\mathbf{v}_{\rm{} tot} \cdot \nabla{} \right) \mathbf{v}_{\rm{} tot} +
+\frac{1}{\rho{}}\nabla{} P &= 0,\\
+\frac{\partial{} u}{\partial{} t} +
+\left(\mathbf{v}_{\rm{} tot} \cdot \nabla{} \right) u +
+\frac{P}{\rho{}} \nabla{} \cdot \mathbf{v}_{\rm{} tot} &= 0.
+\end{align}
+To convert to comoving coordinates, we need to take into account the
+appropriate operator transformations:
+\begin{align}
+\nabla{} &\rightarrow{} \frac{1}{a} \nabla{}', \\
+\frac{\partial{}}{\partial{} t} &\rightarrow{}
+\frac{\partial{}}{\partial{} t} - \frac{\dot{a}}{a} \mathbf{r}' \cdot \nabla{}',
+\end{align}
+the latter of which follows from the explicit time dependence of the
+comoving coordinate $\mathbf{r}'$. Substituting the definitions of the
+comoving variables and operators into the first Euler equation, we
+obtain
+\begin{equation}
+\frac{\partial{} \rho{}'}{\partial{} t} + \frac{1}{a^2} \nabla{}' \cdot \left(
+\rho{}' \mathbf{v}' \right) = 0,
+\end{equation}
+which is the same as the original continuity equation but now with an extra
+$1/a^2$ for the second term (the same correction factor that appears
+for the ``drift'' in the SPH case). For the velocity equation, we find
+\begin{multline}
+\frac{\partial{} \mathbf{v}'}{\partial{} t} + \frac{1}{a^2} \left( \mathbf{v}' \cdot
+\nabla{}' \right) \mathbf{v}' + \frac{1}{a^{3(\gamma{} - 1)}\rho{}'}
+\nabla{}' P' = \\
+- \nabla{}' \left( \frac{1}{2} a \ddot{a} \mathbf{r}'\cdot\mathbf{r}'
+\right).
+\end{multline}
+The right hand side of this equation is simply the extra term we absorb in the
+potential through the gauge transformation; the $1/a^2$ dependence in the
+second term on the left hand side again corresponds to the SPH ``drift'' factor,
+while the more complicated correction factor for the third term corresponds
+to the SPH ``kick'' factor in the equation of motion for the
+velocity. Finally, the thermal energy equation reduces to
+\begin{equation}
+\frac{\partial{} u'}{\partial{} t} + \frac{1}{a^2} \left( \mathbf{v}' .
+\nabla{}' \right) u' + \frac{1}{a^2} \frac{P'}{\rho{}'} \nabla{}' \cdot \mathbf{v}'
+= 0.
+\end{equation}
+Again, this gives us the same correction factors as used by the SPH energy
+equation.\\
+
+Unfortunately, the system of equations above is no longer consistent with the
+original equations. For the continuity equation, we can transform to a new
+time coordinate $t'$ defined by
+\begin{equation}
+\dot{t}' = \frac{1}{a^2}, \quad{} t'(t) = \int \frac{dt}{a^2(t)} + {\rm{}const,}
+\end{equation}
+and end up with the original continuity equation in comoving variables. The
+same transformation brings the thermal energy equation into its original form
+in comoving variables. Unfortunately, the same does not work for the velocity
+equation due to the $a^{-3(\gamma{}-1)}$ factor in the third term (note that
+for the specific choice $\gamma{}=5/3$ this procedure would work).
+To get around this issue, we will rewrite the velocity equation as
+\begin{multline}
+\frac{\partial{} \mathbf{v}'}{\partial{} t} + \frac{1}{a^2} \left(
+\mathbf{v}' \cdot
+\nabla{}' \right) \mathbf{v}' + \frac{1}{a^2\rho{}'}
+\nabla{}' P' = \\
+- \nabla{}' \left( \frac{1}{2} a \ddot{a} \mathbf{r}'\cdot\mathbf{r}'
+\right) - \left(\frac{1}{a^{3(\gamma{} - 1)}} - \frac{1}{a^2} \right)
+\nabla{}'P'
+\end{multline}
+and treat the extra correction term on the right hand side as a source term,
+like we do for the gravitational acceleration. This means we end up with
+a fully consistent set of comoving Euler equations, so that we can solve the
+Riemann problem in the comoving frame.
+
+If we now convert the primitive Euler equations back to their conservative form,
+we end up with the following set of equations:
+\begin{equation}
+\frac{\partial{} \rho{}'}{\partial{} t} + \frac{1}{a^2} \nabla{}' \cdot \left(
+\rho{}' \mathbf{v}' \right) = 0,
+\end{equation}
+\begin{multline}
+\frac{\partial{} \rho{}' \mathbf{v}'}{\partial{} t} + \frac{1}{a^2}
+\nabla{} \cdot \left( \rho{}' \mathbf{v}'\mathbf{v}' + P' \right) = \\
+- \rho{}' \nabla{}' \left( \frac{1}{2} a \ddot{a} \mathbf{r}'\cdot\mathbf{r}'
+\right) - \left(\frac{1}{a^{3(\gamma{} - 1)}} - \frac{1}{a^2} \right)
+\nabla{}'P',
+\end{multline}
+\begin{multline}
+\frac{\partial{}}{\partial{} t} \left( \rho{}' u' + \frac{1}{2} \rho{}'
+\mathbf{v}'\cdot\mathbf{v}' \right) +\\
+\frac{1}{a^2}\nabla{} \cdot \left( \rho{}' u' \mathbf{v}' +
+\frac{1}{2} \rho{}' \left(\mathbf{v}'\cdot \mathbf{v}'\right)
+\mathbf{v}' + P' \mathbf{v}' \right) =\\
+- \rho{}' \mathbf{v}'\cdot\nabla{}' \left( \frac{1}{2} a \ddot{a} \mathbf{r}'\cdot
+\mathbf{r}' \right) -
+\left(\frac{1}{a^{3(\gamma{} - 1)}} - \frac{1}{a^2} \right)
+\mathbf{v}'\cdot\nabla{}'P'.
+\end{multline}
+These equations tell us that the mass, comoving momentum and comoving total
+energy are conserved in the case of adiabatic expansion ($\gamma{} = 5/3$).
+For a more general $\gamma{}$, we however end up with extra source terms that
+depend on $\nabla{}'P'$. Since we already use this quantity for the gradient
+reconstruction step, adding this term is straightforward. The additional time
+step required to integrate the source term is
+\begin{multline}
+\Delta{} t_{\rm{} kick,c} \equiv \int_{a_n}^{a_{n+1}}
+\left(\frac{1}{a^{3(\gamma{} - 1)}} - \frac{1}{a^2} \right) dt \\ =
+\Delta{} t_{\rm{} kick,h} - \Delta{} t_{\rm{} drift}.
+\end{multline}
+
+The last issue we need to address is the appropriate scale factor for the
+gravitational correction term that is used by the finite volume flavour of
+GIZMO. Remember that in GIZMO we evolve the comoving conserved quantities. The
+evolution equations for the conserved quantities of particle $i$ are then
+simply given by integrating over the
+comoving ``volume'' of the particle and adding the appropriate correction terms
+(we ignore the comoving correction terms for this derivation):
+\begin{align}
+\frac{d m_i'}{dt} &= -\frac{1}{a^2} \sum_j
+\mathbf{F}_{m,ij}'\left(\rho{}'\mathbf{v}'\right),\\
+\frac{d \mathbf{p}_i'}{dt} &= -\frac{1}{a^2} \sum_j
+\mathbf{F}_{p,ij}'\left(\rho{}'\mathbf{v}'\mathbf{v}' +
+P\mathbb{1}\right) - \frac{1}{a}\nabla{}'\phi{}_i',
+\end{align}
+\begin{multline}
+\frac{d E_i'}{dt} = -\frac{1}{a^2} \sum_j
+\mathbf{F}_{E,ij}'\left( \rho{}' u' \mathbf{v}' +
+\frac{1}{2} \rho{}' \left(\mathbf{v}'\cdot \mathbf{v}'\right)
+\mathbf{v}' + P' \mathbf{v}' \right) \\
+- \frac{1}{a} \mathbf{p}_i'\cdot{}\nabla{}'\phi{}_i',
+\end{multline}
+where $\mathbf{F}_{X,ij}'(Y)$ represents the appropriately geometrically evaluated
+flux $Y$ for conserved quantity $X$ between particle $i$ and particle $j$.
+In finite volume GIZMO, the particle
+velocity $\mathbf{v}_i' = \mathbf{w}_i' + \mathbf{v}_{i,{\rm{}rel}}'$ consists of the
+actual particle movement $\mathbf{w}_i'$ and the relative movement of the fluid
+w.r.t. the particle movement, $\mathbf{v}_{i,{\rm{}rel}}'$.
+We can therefore replace the gravitational contribution
+to the energy evolution with \citep{Springel2010}
+\begin{equation}
+\mathbf{p}_i'\cdot{}\nabla{}'\phi{}_i' \rightarrow{} m_i'\mathbf{w}_i' \cdot{}
+\nabla{}'\phi{}_i' + \int{} \rho{}'\left(\mathbf{v}' -
+\mathbf{w}_i' \right)\cdot{}
+\nabla{}'\phi{}' dV
+\end{equation}
+to get a more accurate update of the total energy. If we make the following
+approximation
+\begin{equation}
+\rho{}'\left(\mathbf{v}' - \mathbf{w}_i' \right) \approx{}
+\left(\mathbf{r}' - \mathbf{r}_i'\right) \nabla{}' \cdot{}
+\left( \rho{}' \left( \mathbf{v}' - \mathbf{w}_i' \right) \right)
+\end{equation}
+and assume that the force is constant over the ``volume'' of the particle, then
+the second term in the gravity contribution reduces to
+\begin{multline}
+\int{} \rho{}'\left(\mathbf{v}' -
+\mathbf{w}_i' \right)\cdot{}
+\nabla{}'\phi{}' dV \approx{} \\\sum_j \frac{1}{2}
+\left(\mathbf{r}_j' - \mathbf{r}_i'\right)
+a^2 \mathbf{F}_{m,ij}'\left(\rho{}'\mathbf{v}'\right) \cdot{}
+\nabla{}'\phi{}'_i.
+\end{multline}
+This means that the gravitational correction term will have a total scale factor
+dependence $a$ instead of the $1/a$ for the normal gravitational contribution
+and the $1/a^2$ for the hydrodynamical flux. We hence need an additional time
+step
+\begin{equation}
+\Delta{}t_{\rm{}kick,corr} = \int_{a_n}^{a_n+1} adt = \frac{1}{H_0}
+\int_{a_n}^{a_n+1} \frac{da}{E(a)}
+\end{equation}
+that needs to be precomputed.
diff --git a/theory/Cosmology/operators.tex b/theory/Cosmology/operators.tex
index 89aa32bae554dceba8f1525cb209728a17154f5b..4ec4ea4b5fa49082295675420f562fa9e45e3e18 100644
--- a/theory/Cosmology/operators.tex
+++ b/theory/Cosmology/operators.tex
@@ -37,19 +37,21 @@ time using $\Delta t_{\rm kick,A} = \Delta t_{\rm
   operator. They then use $\int H dt$ as the operator, which
   integrates out trivially. This slight inconsistency with the rest of
   the time-integration operators is unlikely to lead to any practical
-  difference.}, whilst the change in energy due to the expansion of
-the Universe (first term in eq.~\ref{eq:cosmo_eom_u}) can be computed
-using
-\begin{equation}
-  \int_{a_n}^{a_{n+1}} H dt = \int_{a_n}^{a_{n+1}} \frac{da}{a} =
-  \log{a_{n+1}} - \log{a_n}.
-\end{equation}
+  difference.}. We additionally compute a few other terms
+appearing in some viscosity terms and subgrid models. There are the
+difference in cosmic time between the start and the end of the step
+and the corresponding change in redshift:
+\begin{align}
+  \Delta t_{\rm cosmic} &= \int_{a_n}^{a_{n+1}} dt = \frac{1}{H_0}
+  \int_{a_n}^{a_{n+1}} \frac{da}{a E(a)},\\
+  \Delta z &= \frac{1}{a_n} - \frac{1}{a_{n+1}} \approx -\frac{H}{a} \Delta t_{\rm cosmic}.
+\end{align}
 Following the same method as for the age of the Universe
-(sec. \ref{ssec:flrw}), the three non-trivial integrals are evaluated
-numerically at the start of the simulation for a series $10^4$ values
-of $a$ placed at regular intervals between $\log a_{\rm begin}$ and
-$\log a_{\rm end}$. The values for a specific pair of scale-factors
-$a_n$ and $a_{n+1}$ are then obtained by interpolating that table
-linearly.
+(sec. \ref{ssec:flrw}), these three non-trivial integrals are
+evaluated numerically at the start of the simulation for a series
+$10^4$ values of $a$ placed at regular intervals between $\log a_{\rm
+  begin}$ and $\log a_{\rm end}$. The values for a specific pair of
+scale-factors $a_n$ and $a_{n+1}$ are then obtained by interpolating
+that table linearly.
 
 
diff --git a/theory/Cosmology/timesteps.tex b/theory/Cosmology/timesteps.tex
index 0ad419d23bba3ecd1bd8703cd3a01e6b8985b4c1..a9856d03a2d04cc0c917176a828fc135a83fb745 100644
--- a/theory/Cosmology/timesteps.tex
+++ b/theory/Cosmology/timesteps.tex
@@ -9,31 +9,38 @@ hence requiring an additional conversion.
 
 \subsubsection{Maximal displacement}
 
-to prevent particles from moving on trajectories that do not include the effects
-of the expansion of the Universe, we compute a maximal time-step for the
-particles based on their RMS peculiar motion:
+to prevent particles from moving on trajectories that do not include
+the effects of the expansion of the Universe, we compute a maximal
+time-step for the particles based on their RMS peculiar motion and
+mean inter-particle separation:
 \begin{equation}
-  \Delta t_{\rm cosmo} \equiv \mathcal{C}_{\rm RMS} \frac{a^2 d_{\rm p}}{\sqrt{\frac{1}{N_{\rm p}}\sum_i | \mathbf{v}_i' |^2}},
+  \Delta t_{\rm cosmo} \equiv \mathcal{C}_{\rm RMS} \frac{a^2}{\sqrt{\frac{1}{N_{\rm p}}\sum_i | \mathbf{v}_i' |^2}} d_{\rm p},
   \label{eq:dt_RMS}
 \end{equation}
-where the sum runs over all particles of a species $p$, $\mathcal{C}_{\rm RMS}$
-is a free parameter, $N_{\rm p}$ is the number of baryonic or non-baryonic
-particles, and $d_{\rm p}$ is the mean inter-particle separation for the
-particle with the lowest mass $m_i$ of a given species:
+where the sum runs over all particles of a species $p$,
+$\mathcal{C}_{\rm RMS}$ is a free parameter, $N_{\rm p}$ is the number
+of baryonic or non-baryonic particles, and $d_{\rm p}$ is the mean
+inter-particle separation at redshift $0$ for the particle with the
+lowest mass $m_i$ of a given species:
 \begin{equation}
-  d_{\rm baryons} \equiv \left(\frac{m_i}{\Omega_{\rm b} \rho_{\rm crit}}\right)^{1/3}, \quad d_{\rm DM} \equiv \left(\frac{m_i}{\left(\Omega_{\rm m} - \Omega_{\rm b}\right) \rho_{\rm crit}}\right)^{1/3}.
+  d_{\rm baryons} \equiv \sqrt[3]{\frac{m_i}{\Omega_{\rm b} \rho_{\rm crit, 0}}}, \quad d_{\rm DM} \equiv \sqrt[3]{\frac{m_i}{\left(\Omega_{\rm m} - \Omega_{\rm b}\right) \rho_{\rm crit, 0}}}.
   \nonumber
 \end{equation}
-We typically use $\mathcal{C}_{\rm RMS} = 0.25$ and given the slow evolution of
-this maximal time-step size, we only re-compute it every time the tree is
-reconstructed.
+We typically use $\mathcal{C}_{\rm RMS} = 0.25$ and given the slow
+evolution of this maximal time-step size, we only re-compute it every
+time the tree is reconstructed.
 
-We also apply an additional criterion based on the smoothing scale of the forces
-computed from the top-level mesh.  In eq.~\ref{eq:dt_RMS}, we replace
-$d_{\rm p}$ by $a_{\rm smooth} \frac{L_{\rm box}}{N_{\rm mesh}}$, where we used
-the definition of the mesh parameters introduced earlier. Given the rather
-coarse mesh usually used in \swift, this time-step condition rarely dominates
-the overall time-step size calculation.
+We also apply an additional criterion based on the smoothing scale of
+the forces computed from the top-level mesh.  In eq.~\ref{eq:dt_RMS},
+we replace $d_{\rm p}$ by
+$a_{\rm smooth} \frac{L_{\rm box}}{N_{\rm mesh}}$, where we used the
+definition of the mesh parameters introduced earlier. Given the rather
+coarse mesh usually used in \swift, this time-step condition rarely
+dominates the overall time-step size calculation.
 
 \subsubsection{Conversion from time to integer time-line} 
 
+\begin{equation}
+  \int_{a_n}^{a_{n+1}} H dt = \int_{a_n}^{a_{n+1}} \frac{da}{a} =
+  \log{a_{n+1}} - \log{a_n}.
+\end{equation}
diff --git a/theory/Multipoles/bibliography.bib b/theory/Multipoles/bibliography.bib
index 077525a9e4db781ea58bd46ef2ba109d6c074be0..245a5223d43aff3ed871cc7ce278fb319d88a938 100644
--- a/theory/Multipoles/bibliography.bib
+++ b/theory/Multipoles/bibliography.bib
@@ -275,6 +275,21 @@ keywords = "adaptive algorithms"
   adsnote = {Provided by the SAO/NASA Astrophysics Data System}
 }
 
+@BOOK{Abramowitz1972,
+   author = {{Abramowitz}, M. and {Stegun}, I.~A.},
+    title = "{Handbook of Mathematical Functions}",
+booktitle = {Handbook of Mathematical Functions, New York: Dover, 1972},
+     year = 1972,
+   adsurl = {http://cdsads.u-strasbg.fr/abs/1972hmfw.book.....A},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
 
-
+@book{Hastings1955
+ author = {Hastings, Cecil},
+ title = {Approximations for Digital Computers},
+ year = {1955},
+ isbn = {0691079145},
+ publisher = {Princeton University Press},
+ address = {Princeton, NJ, USA},
+} 
 
diff --git a/theory/SPH/Flavours/anarchy.tex b/theory/SPH/Flavours/anarchy.tex
new file mode 100644
index 0000000000000000000000000000000000000000..5924f9438f9b553298b0d45a8e4d7ddae9167270
--- /dev/null
+++ b/theory/SPH/Flavours/anarchy.tex
@@ -0,0 +1,123 @@
+\section{ANARCHY-SPH}
+\label{sec:sph:anarchy}
+
+This section is loosely based on Dalla Vecchia (\textit{in prep.}), also described in section 2.2.2 of
+\cite{Schaller2015}.\\
+
+The version of ANARCHY that is currently implemented in \swift{} is based on the Pressure-Energy
+(P-U) SPH scheme, rather than the original Pressure-Entropy. This was chosen to make the
+implementation of sub-grid physics easier, as well as injection of energy more accurate as
+no iteration of the entropy-energy relation is required.
+
+ANARCH SPH comprises of:
+\begin{itemize}
+	\item Pressure-Energy SPH
+	\item \citet[][, henceforth C\&D]{cullen2010} variable artificial viscosity
+	\item A basic thermal diffusion term
+	\item The time-step limiter from \citet{durier2012}.
+\end{itemize}
+
+\subsection{Equations of Motion}
+
+The following smoothed quantities are required, and are calculated in the density loop:
+\begin{itemize}
+	\item $\rho_i = [h_i^{-n_d}]\sum_j m_j w_{i}$
+	\item $(d\rho/dh)_i = - [h_i^{-n_d - 1}]\sum_j m_j ( n_d * w_i + x_i \nabla_i w_i)$
+	\item $\bar{P}_i = [(\gamma - 1)h_i^{-n_d}]\sum_j m_j u_j w_{i}$
+	\item $(d\bar{P}/dh)_i = - [(\gamma - 1)h_i^{-n_d - 1}]\sum_j m_j u_j ( n_d * w_i + x_i \nabla_i w_i)$
+	\item $n_i = [h_i^{-n_d}]\sum_j w_{i}$
+	\item $(dn/dh)_i = - [h_i^{-n_d - 1}]\sum_j ( n_d * w_i + x_i \nabla_i w_i)$
+	\item $(\nabla \cdot \mathbf{v})_i = - [a^{-2} \rho_i^{-1} h^{-n_d - 1}]\sum_j m_j \mathbf{v}_{ij} \cdot \tilde{\mathbf{x}}_{ij} \nabla_i w_i$
+	% Think the cosmo factor entering here is wrong...
+	\item $(\nabla \times \mathbf{v})_i = - [a^{-2} \rho_i^{-1} h^{-n_d - 1} + Hn_d]\sum_j m_j \mathbf{v}_{ij} \times \tilde{\mathbf{x}}_{ij} \nabla_i w_i$
+\end{itemize}
+with quantities in square brackets added in {\tt hydro\_end\_density} and:
+\begin{itemize}
+	\item $h_i$ the smoothing length of particle $i$
+	\item $n_d$ the number of hydro dimensions
+	\item $m_j$ the mass of particle $j$
+	\item $w_{i}$ the dimensionless kernel evalulated at $x_i = r / h_i$
+	\item $r$ the interparticle separation of particles $i$ and $j$
+	\item $u_i$ the internal energy of the particle
+	\item $\gamma$ the ratio of specific heats
+	\item $\mathbf{v}_{ij}$ the difference between the velocities of particle $i$ and $j$
+	\item $\tilde{\mathbf{x}}_{ij}$ the unit vector connecting particles $i$ and $j$
+	\item $a$ the current scale factor
+	\item $H$ the current Hubble constant
+\end{itemize}
+
+The ANARCHY scheme requires a gradient loop, as intermediate smoothed quantities are required
+for the artificial viscosity and diffusion schemes. The following quatntities are calculated:
+\begin{itemize}
+	\item $v_{{\rm sig}, i} = \rm{max}(v_{\rm sig}, c_i + c_j - 3\mathbf{v}_{ij} \cdot \tilde{\mathbf{x}}_{ij})$
+	\item $\nabla^2 u_i = [2]\sum_j m_j \frac{u_i - u_j}{\rho_j} \frac{\nabla_i W_i}{r_{ij}}$
+\end{itemize}
+with quantities in square brackets added in {\tt hydro\_end\_gradient} and:
+\begin{itemize}
+	\item $v_{\rm sig}$ the siginal velocity	
+	\item $c_i$ the sound speed of particle $i$
+\end{itemize}
+
+In {\tt hydro\_prepare\_force}, the differential equations for the viscosity and
+diffusion schemes are integrated as follows. This includes some logic, so it is
+split into viscosity:
+\begin{itemize}
+	\item $\tau_i = h_i / (2 v_{{\rm sig}, i} \ell$
+	\item $\dot{\nabla \cdot \mathbf{v}_i} =
+	       \left({\nabla \cdot \mathbf{v}_i}(t+dt) - {\nabla \cdot \mathbf{v}_i}(t)\right)
+	       / dt$
+	\item $S_i = h_i^2 {\rm max}(0, -\dot{\nabla \cdot \mathbf{v}_i})$
+	\item $\alpha_{{\rm loc}, i} = \alpha_{\rm max} S_i / (S_i + v_{{\rm sig}, i}^2)$.
+\end{itemize}
+and diffusion:
+\begin{itemize}
+	\item $\dot{\tilde{\alpha}}_i = \beta h_i \frac{\nabla^2 u_i}{\sqrt{u_i}}$
+\end{itemize}
+where:
+\begin{itemize}
+	\item $\alpha_i$ is the viscosity coefficient
+	\item $\tilde{\alpha}_i$ is the diffusion coefficient
+	\item $\tau_i$ is the timescale for decay
+	\item $\ell$ is the viscosity length coefficient
+	\item $\beta$ is the diffusion length coefficient
+\end{itemize}
+The equations are then integrated as follows for viscosity:
+\begin{enumerate}
+	\item If $\alpha_{\rm loc} > \alpha_i$, update $\alpha_i$ to $\alpha_{\rm loc}$
+	      immediately.
+	\item Otherwise, decay the viscosity, with $\dot{\alpha}_i = (\alpha_{\rm loc} - \alpha_i) / \tau_i$.
+	      This equation is integrated with the same time-step as the velocity divergence derivative
+	      uses, and is the same time-step used for the cooling.
+	\item Finally, if $\alpha_i < \alpha_{\rm min}$, $\alpha_i$ is reset to that minimal
+	      value.
+\end{enumerate}
+and for diffusion:
+\begin{enumerate}
+	\item First, find the new diffusion coefficient, $\tilde{\alpha}_i(t+dt) = 
+	      \tilde{\alpha}_i(t) + \dot{\tilde{\alpha}}_i \cdot dt$, using the
+	      same time-step as for the viscosity.
+	\item If this is outside of the bounds set for the coefficient, set it
+	      to the respective upper or lower bound.
+\end{enumerate}
+The final force loop calculates the equations of motion for the particles ready for
+their time-integration. The following quantities are calculated:
+\begin{itemize}
+	\item $\mathbf{a}_{\rm hydro} = -\sum_j m_j u_i u_j (\gamma - 1)^2 \left(
+	       \frac{f_{ij}}{\bar{P}_i} \nabla_i W_i + \frac{f_{ji}}{\bar{P}_j} \nabla_j W_j\right)$
+	\item $\mathbf{a}_{\rm visc} = - \frac{1}{8}\sum_j (\alpha_i + \alpha_j) v_{{\rm sig}, i}
+	       \mu_{ij} (b_i + b_j) (\nabla_i W_i + \nabla_j W_j)/ (\rho_i + \rho_j)$
+	\item $\dot{u}_{ij, {\rm hydro}} = \sum_j m_j u_i u_j (\gamma - 1)^2
+	       \frac{f_{ij}}{\bar{P}_i} \nabla_i W_i$
+	\item $\dot{u}_{ij, {\rm visc}} = \frac{1}{2} \a_{\rm visc} (\mathbf{v}_{ij} \cdot \tilde{\mathbf{x}}_{ij} + r^2a^2 H)$
+	\item $v_{{\rm diff}, i} = {\rm max}(0, c_i + c_j + \mathbf{v}_{ij} \cdot \tilde{\mathbf{x}}_{ij} + r^2a^2 H)$
+	\item $\dot{u}_{ij, {\rm diff}} = \frac{1}{2}(\tilde{\alpha}_i + \tilde{\alpha}_j) a^{(3\gamma - 5)/2)}
+	       v_{{\rm diff}, i} (u_i - u_j) (\nabla_i W_i + \nabla_j W_j)/ (\rho_i + \rho_j) $
+	\item $\dot{u}_i = \sum_j \dot{u}_{ij, {\rm hydro}} +  \dot{u}_{ij, {\rm visc}} + \dot{u}_{ij, {\rm diff}}$
+\end{itemize}
+where:
+\begin{itemize}
+	\item $f_{ij}$ are the variable smoothing length correction factors
+	\item $b_i$ is the Balsara switch for particle $i$
+	\item $\mu_{ij} = a^{(3\gamma - 5)/2) {\rm min}(\mathbf{v}_{ij} \cdot \tilde{\mathbf{x}}_{ij} + r^2a^2 H, 0)$
+\end{itemize}
+
diff --git a/theory/SPH/Flavours/bibliography.bib b/theory/SPH/Flavours/bibliography.bib
index 2bc11dacca90fe03d05c2e847503105d80eb1317..02ebed25a407ae5adba87d9f46d3f004bf9fbae2 100644
--- a/theory/SPH/Flavours/bibliography.bib
+++ b/theory/SPH/Flavours/bibliography.bib
@@ -97,4 +97,17 @@ archivePrefix = "arXiv",
 
 
 
+@article{Morris1997,
+abstract = {Smoothed particle hydrodynamics is a Lagrangian particle method for fluid dynamics which simulates shocks by using an artificial viscosity. Unlike Eulerian methods it is not convenient to reduce the effects of viscosity by means of switches based on spatial gradients. In this paper we introduce the idea of time-varying coefficients which fits more naturally with a particle formulation. Each particle has a viscosity parameter which evolves according to a simple source and decay equation. The source causes the parameter to grow when the particle enters a shock and the decay term causes it to decay to a small value beyond the shock. Tests on one-dimensional shocks and a two-dimensional shock-bubble interaction confirm that the method gives good results. {\textcopyright} 1997 Academic Press.},
+author = {Morris, J. P. and Monaghan, J. J.},
+doi = {10.1006/jcph.1997.5690},
+isbn = {0021-9991},
+issn = {00219991},
+journal = {Journal of Computational Physics},
+number = {1},
+pages = {41--50},
+title = {{A switch to reduce SPH viscosity}},
+volume = {136},
+year = {1997}
+}
 
diff --git a/theory/SPH/Flavours/sph_flavours.tex b/theory/SPH/Flavours/sph_flavours.tex
index 5d62af3aab777e66f0b33b89e861d2b21e10b38c..d84bdcc0b42129e9d5008051dd6e0e212e4e9463 100644
--- a/theory/SPH/Flavours/sph_flavours.tex
+++ b/theory/SPH/Flavours/sph_flavours.tex
@@ -590,8 +590,39 @@ both sides, such that
 
 %##############################################################################
 
-\subsection{Anarchy SPH}
-Dalla Vecchia (\textit{in prep.}), also described in section 2.2.2 of
-\cite{Schaller2015}.\\
-\label{sec:sph:anarchy}
-\tbd 
+\subsection{Variable artificial viscosity}
+
+Here we consider a modified version of the Pressure-Energy scheme described
+above but one that uses a variable artificial viscosity. The prescription used
+in this scheme was originally introduced by \citet{Morris1997} and is almost
+identical to the above equations, but tracks an individual viscosity paramaeter
+$\alpha_i$ for each particle. This viscosity is then updated each time-step to
+a more appropraite value. The hope is that the artificial viscosity will be
+high in regions that contain shocks, but as low as possible in regions where it
+is uneccesary such as shear flows. This is already accomplished somewhat with
+the inclusion of a \citet{Balsara1995} switch, but a fixed $\alpha$ still leads
+to spurious transport of angular momentum and vorticity.
+
+The equation governing the growth of the viscosity is
+\begin{align}
+  \frac{\mathrm{d} \alpha_i}
+       {\mathrm{d} t} = 
+  - (\alpha_i - \alpha_{\rm min}) \ell \frac{c_{s, i}}{h},
+  \label{eq:sph:pu:alphadt}
+\end{align}
+with $\alpha_{\rm min}=0.1$ the minimal artificial viscosity parameter, and
+$\ell=0.1$ the viscosity ``length" that governs how quickly the viscosity
+decays. This equation is solved implicitly in a similar way to
+$\mathrm{d}\mathbf{v}/ \mathrm{d}t$ and $\mathrm{d}u/\mathrm{d}t$ - i.e.
+$\alpha_{i} (t+\Delta t_i) = \alpha_{i}(t) + \dot{\alpha}_i \Delta t_i$.
+
+To ensure that the scheme is conservative, the viscosity coefficients must be
+combined in a fully conservative way; this is performed by taking the mean
+viscosity parameter of the two particles that are being interacted, such that
+\begin{align}
+  \alpha_{ij} = \frac{\alpha_i + \alpha_j}{2}.
+\end{align}
+The rest of the artificial viscosity implementation, including the
+\citet{Balsara1995} switch, is the same - just with $\alpha \rightarrow
+\alpha_{ij}$.
+
diff --git a/theory/SPH/Flavours/sph_flavours_standalone.tex b/theory/SPH/Flavours/sph_flavours_standalone.tex
index 20c9f1451c2499d661bfbb1022bfd34c02fde4dd..7cc92fdb438ea09916dfbd12bfb554ddb9fc2ecd 100644
--- a/theory/SPH/Flavours/sph_flavours_standalone.tex
+++ b/theory/SPH/Flavours/sph_flavours_standalone.tex
@@ -24,6 +24,7 @@
 
 \maketitle
 \input{sph_flavours}
+\input{anarchy}
 
 \bibliographystyle{mnras}
 \bibliography{./bibliography}
diff --git a/theory/SPH/swift_sph.tex b/theory/SPH/swift_sph.tex
index e9c185c3cd0b845bff75be2092846bffbdcfd1a9..51ab6c3e49ae6b74d8b63590caeadd962cd6e4d5 100644
--- a/theory/SPH/swift_sph.tex
+++ b/theory/SPH/swift_sph.tex
@@ -34,6 +34,7 @@
 
 \section{SPH flavours}
 \input{Flavours/sph_flavours}
+\input{Flavours/anarchy}
 
 \bibliographystyle{mnras}
 \bibliography{./bibliography}
diff --git a/theory/Star_Formation/bibliography.bib b/theory/Star_Formation/bibliography.bib
new file mode 100644
index 0000000000000000000000000000000000000000..95a3678d868a3229075bb98db1f1f6db3d9b05c3
--- /dev/null
+++ b/theory/Star_Formation/bibliography.bib
@@ -0,0 +1,86 @@
+@ARTICLE{schaye2008,
+   author = {{Schaye}, J. and {Dalla Vecchia}, C.},
+    title = "{On the relation between the Schmidt and Kennicutt-Schmidt star formation laws and its implications for numerical simulations}",
+  journal = {\mnras},
+archivePrefix = "arXiv",
+   eprint = {0709.0292},
+ keywords = {stars: formation , galaxies: evolution , galaxies: formation , galaxies: ISM},
+     year = 2008,
+    month = jan,
+   volume = 383,
+    pages = {1210-1222},
+      doi = {10.1111/j.1365-2966.2007.12639.x},
+   adsurl = {http://adsabs.harvard.edu/abs/2008MNRAS.383.1210S},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+@ARTICLE{schaye2004,
+   author = {{Schaye}, J.},
+    title = "{Star Formation Thresholds and Galaxy Edges: Why and Where}",
+  journal = {\apj},
+   eprint = {astro-ph/0205125},
+ keywords = {Galaxies: Evolution, Galaxies: Formation, Galaxies: ISM, ISM: Clouds, Stars: Formation},
+     year = 2004,
+    month = jul,
+   volume = 609,
+    pages = {667-682},
+      doi = {10.1086/421232},
+   adsurl = {http://adsabs.harvard.edu/abs/2004ApJ...609..667S},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+@ARTICLE{kennicutt1998,
+   author = {{Kennicutt}, Jr., R.~C.},
+    title = "{The Global Schmidt Law in Star-forming Galaxies}",
+  journal = {\apj},
+   eprint = {astro-ph/9712213},
+ keywords = {GALAXIES: EVOLUTION, GALAXIES: ISM, GALAXIES: SPIRAL, GALAXIES: STELLAR CONTENT, GALAXIES: STARBURST, STARS: FORMATION, Galaxies: Evolution, Galaxies: ISM, Galaxies: Spiral, Galaxies: Starburst, Galaxies: Stellar Content, Stars: Formation},
+     year = 1998,
+    month = may,
+   volume = 498,
+    pages = {541-552},
+      doi = {10.1086/305588},
+   adsurl = {http://adsabs.harvard.edu/abs/1998ApJ...498..541K},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+@ARTICLE{dallavecchia2012,
+   author = {{Dalla Vecchia}, C. and {Schaye}, J.},
+    title = "{Simulating galactic outflows with thermal supernova feedback}",
+  journal = {\mnras},
+archivePrefix = "arXiv",
+   eprint = {1203.5667},
+ keywords = {methods: numerical, ISM: bubbles, ISM: jets and outflows, galaxies: evolution, galaxies: formation, galaxies: ISM },
+     year = 2012,
+    month = oct,
+   volume = 426,
+    pages = {140-158},
+      doi = {10.1111/j.1365-2966.2012.21704.x},
+   adsurl = {http://adsabs.harvard.edu/abs/2012MNRAS.426..140D},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+
+@ARTICLE{schaye2015,
+   author = {{Schaye}, J. and {Crain}, R.~A. and {Bower}, R.~G. and {Furlong}, M. and 
+	{Schaller}, M. and {Theuns}, T. and {Dalla Vecchia}, C. and 
+	{Frenk}, C.~S. and {McCarthy}, I.~G. and {Helly}, J.~C. and 
+	{Jenkins}, A. and {Rosas-Guevara}, Y.~M. and {White}, S.~D.~M. and 
+	{Baes}, M. and {Booth}, C.~M. and {Camps}, P. and {Navarro}, J.~F. and 
+	{Qu}, Y. and {Rahmati}, A. and {Sawala}, T. and {Thomas}, P.~A. and 
+	{Trayford}, J.},
+    title = "{The EAGLE project: simulating the evolution and assembly of galaxies and their environments}",
+  journal = {\mnras},
+archivePrefix = "arXiv",
+   eprint = {1407.7040},
+ keywords = {methods: numerical, galaxies: evolution, galaxies: formation, cosmology: theory},
+     year = 2015,
+    month = jan,
+   volume = 446,
+    pages = {521-554},
+      doi = {10.1093/mnras/stu2058},
+   adsurl = {http://adsabs.harvard.edu/abs/2015MNRAS.446..521S},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+
diff --git a/theory/Star_Formation/run.sh b/theory/Star_Formation/run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7608ef7cb7e5ecc07a38a773e550cb42beec6fb7
--- /dev/null
+++ b/theory/Star_Formation/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+echo "Generating PDF..."
+pdflatex -jobname=starform starformation_standalone.tex
+bibtex starform.aux
+pdflatex -jobname=starform starformation_standalone.tex
+pdflatex -jobname=starform starformation_standalone.tex
diff --git a/theory/Star_Formation/starformation.tex b/theory/Star_Formation/starformation.tex
new file mode 100644
index 0000000000000000000000000000000000000000..c1b8d511ce2f3cded2850a9b30a696dd76b8b47f
--- /dev/null
+++ b/theory/Star_Formation/starformation.tex
@@ -0,0 +1,122 @@
+\section{Star Formation in EAGLE}
+
+In this section we will shortly explain how the star formation in EAGLE works.
+The implemented star formation is based on the \citet{schaye2008}, instead of 
+the constant density threshold used by \citet{schaye2008}, a metallicity 
+dependent density threshold is used, following \citet{schaye2004}. An important 
+property of the implemented star formation law is the explicit reproducability 
+of the Kennicutt-Schmidt star formation law \citep{kennicutt1998}:
+\begin{align}
+ \dot{\Sigma}_\star &= A \left( \frac{\Sigma}{1 ~\text{M}_\odot ~\text{pc}^{-2}} \right)
+\end{align}
+
+\noindent In which $A$ is the normalization of the Kennicutt-Schmidt, $\dot{\Sigma}_\star$
+is the surface density of newly formed stars, $\Sigma$ is the gas surface 
+density and $n$ is the power law index. In the case of the star formation 
+implementation of \citet{schaye2008}, the star formation law is given by 
+a pressure law:
+
+\begin{align}
+\dot{m}_\star &= m_g A ( 1~\text{M}_\odot~\text{pc}^{-2})^{-n} \left( 
+\frac{\gamma}{G} f_g P \right)^{(n-1)/2}.
+\end{align}
+
+\noindent In which $m_g$ is the gas particle mass, $\gamma$ is the ratio of specific heats,
+$G$ is the gravitational constant, $f_g$ is the mass fraction of gas (unity in 
+EAGLE), and $P$ is the total pressure of the gas particle. In this equation
+$A$ and $n$ are directly constrainted from the observations of the Kennicutt-
+Schmidt law so both variables do not require tuning. Further there are 
+two constrains on the over density which should be $\Delta > 57.7$ (why this 
+specific number? \citet{schaye2008} says $\Delta \approx 60$), and the
+temperature of the gas should be atleast $T_\text{crit}<10^5 ~\text{K}$
+
+Besides this it is required that there is an effective equation of state. 
+Specifically we could take this to be equal to:
+\begin{align}
+ P &= P_\text{eos} (\rho) = P_\text{tot,c}\left( \frac{\rho_\text{g}}{\rho_\text{g,c}} \right)^{\gamma_\text{eff}}.
+\end{align}
+\noindent In which $\gamma_\text{eff}$ is the polytropic index. But the EAGLE 
+code just uses the EOS of the gas?
+
+\noindent Using this it is possible to calculate the propability that a gas particle is 
+converted to a star particle:
+\begin{align}
+ \text{Prob.} = \text{min} \left( \frac{\dot{m}_\star \Delta t}{m_g}, 1 \right) 
+ = \text{min} \left( A \left( 1 ~\text{M}_\odot ~\text{pc}^{-2} \right)^{-n} \left( \frac{\gamma}{G} f_g P_\text{tot} \right)^{(n-1)/2}, 1 \right).
+\end{align}
+
+\noindent In general we use $A=1.515 \cdot 10^{-4}~\text{M}_\odot ~\text{yr}^{-1} ~\text{kpc}^{-2}$ 
+and $n=1.4$. In the case of high densities ($n_\text{H,thresh} > 10^3 ~\text{cm}^{-3}$),
+the power law will be steaper and have a value of $n=2$ \citep{schaye2015}. This will also adjust
+the normalization of the star formation law, both need to be equal at the 
+pressure with a corresponding density. This means we have:
+\begin{align}
+\begin{split}
+ A \left( 1 ~\text{M}_\odot ~\text{pc}^{-2} \right)^{-n} \left( \frac{\gamma}{G} f_g P_\text{tot} \right)^{(n-1)/2} \\
+ = A_\text{high} \left( 1 ~\text{M}_\odot ~\text{pc}^{-2} \right)^{-n_\text{high}} \left( \frac{\gamma}{G} f_g P_\text{tot} \right)^{(n_\text{high}-1)/2}. 
+\end{split}
+\end{align}
+\begin{align}
+A_\text{high} = A \left( 1 ~\text{M}_\odot ~\text{pc}^{-2} \right)^{n_\text{high}-n} \left( \frac{\gamma}{G} f_g P_\text{tot}(\rho_{hd}) \right)^{(n-n_\text{high})/2}. 
+\end{align}
+In which $\rho_{hd}$ is the density at which both laws are equal.
+
+This is differently from the EAGLE code ($f_g=1$) which uses:
+\begin{align}
+A_\text{high} = A \left( \frac{\gamma}{G} P_\text{tot} (\rho_{hd}) \right)^{(n-n_\text{high})/2} . 
+\end{align}
+
+Besides this we also use the metallicity dependent density threshold given by \citep{schaye2004}:
+\begin{align}
+n^*_\text{H} (Z) &= n_\text{H,norm} \left( \frac{Z}{Z_0} \right)^{n_z}.
+\end{align}
+In which $n_\text{H,norm}$ is the normalization of the metallicity dependent 
+star formation law, $Z$ the metallicity, $Z_0$ the normalization metallicity,
+and $n_Z$ the power law of the metallicity dependence on density. standard 
+values we take for the EAGLE are $n_\text{H,norm} = 0.1 ~\text{cm}^{-3}$, 
+$n_Z=-0.64$ and $Z_0 = 0.002$. Also we impose that the density threshold cannot
+exceed the maximum value of $n_\text{H,max,norm}$ \citep{schaye2015}.
+
+For the initial pressure determination the EAGLE code uses (Explanation needed):
+\begin{align}
+ P_\text{cgs} &= (\gamma -1) \frac{n_\text{EOS, norm} \cdot m_H}{X} T_{EOS,jeans} \cdot \frac{k_B}{1.22 \cdot (\gamma -1) m_H } \left( \frac{n_\text{highden}}{n_\text{norm,EOS}} \right)^{\gamma_\text{eff}}.
+\end{align}
+
+To determine the pressure for the star formation law the EAGLE code uses the 
+physical pressure? Is this the effective EOS or the real EOS of the gas?
+
+Compared to the EAGLE code we can calculate a fraction of the calculations already 
+in the struct which are not depending on time, this may save some calculations. 
+
+Besides this we also use the more extended temperature criteria proposed by
+\citet{dallavecchia2012} that uses a temperature floor given by:
+\begin{align}
+ \log_{10} T < \log_{10} T_\text{eos} + 0.5.
+\end{align}
+
+\begin{table}
+\begin{tabular}{l|l|l|l}
+Variable & Parameter file name   & Default value & unit \\ \hline
+$A$    & SchmidtLawCoeff\_MSUNpYRpKPC2   & $1.515\cdot10^{-4}$    & $M_\odot ~yr^{-1} ~kpc^{-2}$ \\
+$n$  & SchmidtLawExponent                & $1.4$         & none  \\
+$\gamma$  & gamma   & $\frac{5}{3}$ & none   \\
+$G$  & No, in constants   & -  & -  \\
+$f_g$ & fg   & $1.$    & none  \\
+$n_{high}$   & SchmidtLawHighDensExponent  & $2.0$  & none  \\
+$n_{H,thresh}$ & SchmidtLawHighDens\_thresh\_HpCM3 & $10^3$ & $cm^{-3}$ \\
+$n_{H,norm}$ & thresh\_norm\_HpCM3 & $.1$ & $cm^{-3}$ \\
+$Z_0$ & MetDep\_Z0 & $0.002$ & none \\
+$n_Z$ & MetDep\_SFthresh\_Slope & $-0.64$ & none \\
+$\Delta$ & thresh\_MinOverDens & $57.7$ & none \\
+$T_{crit}$ & thresh\_temp & $10^5$ & $K$ \\
+$n_{H,max,norm}$ & thresh\_max\_norm\_HpCM3 & 10.0 & $cm^{-3}$ 
+\end{tabular}
+\end{table}
+
+Questions:
+Is rho in part mass density or number density??
+Why is the cooling\_get\_temperature() depending on so many variables?
+I would expect $P_\text{tot,c} = n k_B T$, but which $n$ and $T$?
+Is Seed in the function or declared outside of function?
+Correct Unit conversion?
+
diff --git a/theory/Star_Formation/starformation_standalone.tex b/theory/Star_Formation/starformation_standalone.tex
new file mode 100644
index 0000000000000000000000000000000000000000..518179a393f7c35ce56091e6b74eaccb133c65de
--- /dev/null
+++ b/theory/Star_Formation/starformation_standalone.tex
@@ -0,0 +1,43 @@
+\documentclass[fleqn, usenatbib, useAMS, a4paper]{mnras}
+\usepackage{graphicx}
+\usepackage{amsmath,paralist,xcolor,xspace,amssymb}
+\usepackage{times}
+\usepackage{comment}
+\usepackage[super]{nth}
+
+\newcommand{\todo}[1]{{\textcolor{red}{#1}}}
+\newcommand{\gadget}{{\sc Gadget}\xspace}
+\newcommand{\swift}{{\sc Swift}\xspace}
+\newcommand{\nbody}{$N$-body\xspace}
+\newcommand{\Lag}{\mathcal{L}}
+
+%opening
+\title{Star formation equations in SWIFT}
+\author{Folkert Nobels}
+\begin{document}
+
+\date{\today}
+
+\pagerange{\pageref{firstpage}--\pageref{lastpage}} \pubyear{2018}
+
+\maketitle
+
+\label{firstpage}
+
+\begin{abstract}
+Making stars all over again.
+\end{abstract}
+
+\begin{keywords}
+\end{keywords}
+
+\input{starformation}
+
+
+
+\bibliographystyle{mnras}
+\bibliography{./bibliography.bib}
+
+\label{lastpage}
+
+\end{document}
diff --git a/tools/Makefile.am b/tools/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..5b075aa8241977ac5545bb1345adb5325a6bb6df
--- /dev/null
+++ b/tools/Makefile.am
@@ -0,0 +1,18 @@
+# Scripts to plot task graphs
+EXTRA_DIST = task_plots/plot_tasks.py task_plots/analyse_tasks.py \
+	     task_plots/process_plot_tasks_MPI task_plots/process_plot_tasks
+
+# Scripts to plot threadpool 'task' graphs
+EXTRA_DIST += task_plots/analyse_threadpool_tasks.py \
+              task_plots/plot_threadpool.py \
+              task_plots/process_plot_threadpool
+
+# Script for scaling plot
+EXTRA_DIST += plot_scaling_results.py \
+              plot_scaling_results_breakdown.py
+
+# Script for gravity accuracy
+EXTRA_DIST += plot_gravity_checks.py
+
+# Combine ICs.
+EXTRA_DIST += combine_ics.py
diff --git a/examples/analyse_dump_cells.py b/tools/analyse_dump_cells.py
similarity index 85%
rename from examples/analyse_dump_cells.py
rename to tools/analyse_dump_cells.py
index 2adfaf319e9c0da33f86a6158da68e6620c47361..2216b5f5fe6aa0c0d9dcc29a8abf0f263d2c3cc4 100755
--- a/examples/analyse_dump_cells.py
+++ b/tools/analyse_dump_cells.py
@@ -47,13 +47,13 @@ mpicol = 20
 
 #  Command-line arguments.
 if len(sys.argv) < 5:
-    print "usage: ", sys.argv[0], " nx ny nz cell1.dat cell2.dat ..."
+    print("usage: ", sys.argv[0], " nx ny nz cell1.dat cell2.dat ...")
     sys.exit(1)
 nx = int(sys.argv[1])
 ny = int(sys.argv[2])
 nz = int(sys.argv[3])
 
-print "# x y z onedge"
+print("# x y z onedge")
 allactives = []
 onedge = 0
 tcount = 0
@@ -65,28 +65,28 @@ for i in range(4, len(sys.argv)):
         continue
 
     #  Select cells that are on the current rank and are top-level cells.
-    rdata = data[data[:,localcol] == 1]
-    tdata = rdata[rdata[:,topcol] == 1]
+    rdata = data[data[:, localcol] == 1]
+    tdata = rdata[rdata[:, topcol] == 1]
 
     #  Separation of the cells is in data.
-    xwidth = tdata[0,xwcol]
-    ywidth = tdata[0,ywcol]
-    zwidth = tdata[0,zwcol]
+    xwidth = tdata[0, xwcol]
+    ywidth = tdata[0, ywcol]
+    zwidth = tdata[0, zwcol]
 
     #  Fill space nx, ny,n nz with all toplevel cells and flag their active
     #  state.
-    space = np.zeros((nx,ny,nz))
+    space = np.zeros((nx, ny, nz))
     actives = []
     for line in tdata:
         ix = int(np.rint(line[xcol] / xwidth))
         iy = int(np.rint(line[ycol] / ywidth))
         iz = int(np.rint(line[zcol] / zwidth))
         active = int(line[activecol])
-        space[ix,iy,iz] = 1 + active
+        space[ix, iy, iz] = 1 + active
         tcount = tcount + 1
         if active == 1:
             actives.append([ix, iy, iz, line])
-    
+
     #  Report all active cells and flag any without 26 neighbours. These are
     #  on the edge of the partition volume and will have foreign neighbour
     #  cells.
@@ -116,13 +116,12 @@ for i in range(4, len(sys.argv)):
                         count = count + 1
         if count < 27:
             onedge = onedge + 1
-            print active[3][0], active[3][1], active[3][2], 1
+            print(active[3][0], active[3][1], active[3][2], 1)
         else:
-            print active[3][0], active[3][1], active[3][2], 0
+            print(active[3][0], active[3][1], active[3][2], 0)
 
     allactives.extend(actives)
 
-print "# top cells: ", tcount, " active: ", len(allactives), " on edge: ", onedge
+print("# top cells: ", tcount, " active: ", len(allactives), " on edge: ", onedge)
 
 sys.exit(0)
-
diff --git a/tools/analyse_runtime.py b/tools/analyse_runtime.py
new file mode 100755
index 0000000000000000000000000000000000000000..fda47afc946405c475b3d3809a2cfd1a2d28d656
--- /dev/null
+++ b/tools/analyse_runtime.py
@@ -0,0 +1,237 @@
+#!/usr/bin/env python
+
+################################################################################
+# This file is part of SWIFT.
+# Copyright (c) 2018 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+################################################################################
+
+import re
+import sys
+import matplotlib
+
+matplotlib.use("Agg")
+from pylab import *
+
+# Plot parameters
+params = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "text.usetex": True,
+    "figure.figsize": (6.45, 6.45),
+    "figure.subplot.left": 0.06,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.06,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.21,
+    "figure.subplot.hspace": 0.13,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+    "text.latex.unicode": True,
+}
+rcParams.update(params)
+
+threshold = 0.008
+
+num_files = len(sys.argv) - 1
+
+labels = [
+    ["Gpart assignment", 1],
+    ["Mesh comunication", 1],
+    ["Forward Fourier transform", 1],
+    ["Green function", 1],
+    ["Backwards Fourier transform", 1],
+    ["engine_recompute_displacement_constraint:", 1],
+    ["engine_exchange_top_multipoles:", 1],
+    ["updating particle counts", 1],
+    ["engine_estimate_nr_tasks:", 1],
+    ["Making gravity tasks", 1],
+    ["Making hydro tasks", 1],
+    ["Splitting tasks", 1],
+    ["Counting and linking tasks", 1],
+    ["Setting super-pointers", 1],
+    ["Making extra hydroloop tasks", 1],
+    ["Making extra starsloop tasks", 1],
+    ["Linking gravity tasks", 1],
+    ["Creating send tasks", 1],
+    ["Exchanging cell tags", 1],
+    ["Creating recv tasks", 1],
+    ["Counting number of foreign particles", 1],
+    ["Recursively linking foreign arrays", 1],
+    ["Setting unlocks", 1],
+    ["Ranking the tasks", 1],
+    ["scheduler_reweight:", 1],
+    ["space_list_useful_top_level_cells:", 1],
+    ["space_rebuild:", 1],
+    ["engine_drift_all:", 0],
+    ["engine_unskip:", 0],
+    ["engine_collect_end_of_step:", 0],
+    ["engine_launch:", 0],
+    ["writing particle properties", 0],
+    ["engine_repartition:", 0],
+    ["engine_exchange_cells:", 1],
+    ["Dumping restart files", 0],
+    ["engine_print_stats:", 0],
+    ["engine_marktasks:", 1],
+    ["Reading initial conditions", 0],
+    ["engine_print_task_counts:", 0],
+    ["engine_drift_top_multipoles:", 0],
+    ["Communicating rebuild flag", 0],
+    ["engine_split:", 0],
+    ["space_init", 0],
+    ["engine_init", 0],
+    ["engine_repartition_trigger:", 0],
+    ["VR Collecting top-level cell info", 0],
+    ["VR Collecting particle info", 0],
+    ["VR Invokation of velociraptor", 0],
+    ["VR Copying group information back", 0]
+]
+times = np.zeros(len(labels))
+counts = np.zeros(len(labels))
+
+cols = [
+    "0.5",
+    "#332288",
+    "#88CCEE",
+    "#44AA99",
+    "#117733",
+    "#999933",
+    "#DDCC77",
+    "#CC6677",
+    "#882255",
+    "#AA4499",
+    "#661100",
+    "#6699CC",
+    "#AA4466",
+    "#4477AA",
+]
+
+total_time = 0
+lastline = ""
+
+for i in range(num_files):
+
+    filename = sys.argv[i + 1]
+    print("Analysing %s" % filename)
+
+    # Open stdout file
+    file = open(filename, "r")
+
+    # Search the different phrases
+    for line in file:
+
+        # Loop over the possbile labels
+        for i in range(len(labels)):
+
+            # Extract the different blocks
+            if re.search("%s took" % labels[i][0], line):
+                counts[i] += 1.0
+                times[i] += float(
+                    re.findall(r"[+-]?((\d+\.?\d*)|(\.\d+))", line)[-1][0]
+                )
+
+        # Find the last line with meaningful output (avoid crash report, batch system stuff....)
+        if re.findall(r"\[[0-9]{4}\][ ]\[*", line) or re.findall(
+            r"^\[[0-9]*[.][0-9]+\][ ]", line
+        ):
+            lastline = line
+
+    # Total run time
+    total_time += float(re.findall(r"[+-]?(\[[0-9]\])?(\[[0-9]*[.][0-9]*\])+", lastline)[0][1][1:-1])
+
+# Conver to seconds
+times /= 1000.0
+
+# Total time
+total_measured_time = np.sum(times)
+print("\nTotal measured time: %.3f s" % total_measured_time)
+
+print("Total time: %f  s\n" % total_time)
+
+# Ratios
+time_ratios = times / total_time
+
+# Better looking labels
+for i in range(len(labels)):
+    labels[i][0] = labels[i][0].replace("_", " ")
+    labels[i][0] = labels[i][0].replace(":", "")
+    labels[i][0] = labels[i][0].title()
+
+times = np.array(times)
+time_ratios = np.array(time_ratios)
+
+# Sort in order of importance
+order = np.argsort(-times)
+times = times[order]
+counts = counts[order]
+time_ratios = time_ratios[order]
+labels = [labels[i] for i in order]
+
+# Keep only the important components
+important_times = [0.0]
+important_ratios = [0.0]
+important_is_rebuild = [0]
+important_labels = ["Others (all below %.1f\%%)" % (threshold * 100)]
+need_print = True
+print("Time spent in the different code sections:")
+for i in range(len(labels)):
+    if time_ratios[i] > threshold:
+        important_times.append(times[i])
+        important_ratios.append(time_ratios[i])
+        important_is_rebuild.append(labels[i][1])
+        important_labels.append(labels[i][0])
+    else:
+        if need_print:
+            print("Elements in 'Other' category (<%.1f%%):" % (threshold * 100))
+            need_print = False
+        important_times[0] += times[i]
+        important_ratios[0] += time_ratios[i]
+
+    print(" - '%-40s' (%5d calls, time: %.4fs): %.4f%%" % (labels[i][0], counts[i], times[i], time_ratios[i] * 100))
+
+# Anything unaccounted for?
+print(
+    "\nUnaccounted for: %.4f%%\n"
+    % (100 * (total_time - total_measured_time) / total_time)
+)
+
+important_ratios = np.array(important_ratios)
+important_is_rebuild = np.array(important_is_rebuild)
+
+
+figure()
+
+def func(pct):
+    return "$%4.2f\\%%$" % pct
+
+
+pie, _, _ = pie(
+    important_ratios,
+    explode=important_is_rebuild * 0.2,
+    autopct=lambda pct: func(pct),
+    textprops=dict(color="0.1", fontsize=14),
+    labeldistance=0.7,
+    pctdistance=0.85,
+    startangle=-15,
+    colors=cols,
+)
+legend(pie, important_labels, title="SWIFT operations", loc="upper left")
+
+savefig("time_pie.pdf", dpi=150)
diff --git a/examples/check_interactions.sh b/tools/check_interactions.sh
similarity index 83%
rename from examples/check_interactions.sh
rename to tools/check_interactions.sh
index 24a534b154313927ee4b2a108d3da7ea5f4d1f31..d688e69bb36b628905668183989d08204604c631 100755
--- a/examples/check_interactions.sh
+++ b/tools/check_interactions.sh
@@ -20,7 +20,7 @@ cd examples/SedovBlast_3D/
 ./getGlass.sh
 python makeIC.py
 
-../swift -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7 
+../swift --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7 
 
 mv sedov_0000.hdf5 sedov_naive.hdf5
 
@@ -29,7 +29,7 @@ cd ../EAGLE_12/
 # Link to ICs
 ln -s /gpfs/data/Swift/web-storage/ICs/EAGLE_ICs_12.hdf5 EAGLE_ICs_12.hdf5
 
-../swift -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+../swift --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv eagle_0000.hdf5 eagle_12_naive.hdf5
 
@@ -45,13 +45,13 @@ make clean; make -j 6
 
 cd examples/SedovBlast_3D/
 
-../swift -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+../swift --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv sedov_0000.hdf5 sedov_serial.hdf5
 
 cd ../EAGLE_12/
 
-../swift -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7 
+../swift --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7 
 
 mv eagle_0000.hdf5 eagle_12_serial.hdf5
 
@@ -67,7 +67,7 @@ make clean; make -j 6
 
 cd examples/SedovBlast_3D/
 
-../swift -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+../swift --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv sedov_0000.hdf5 sedov_vec.hdf5
 
@@ -98,7 +98,7 @@ fi
 
 cd ../EAGLE_12/
 
-../swift -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+../swift --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv eagle_0000.hdf5 eagle_12_vec.hdf5
 
@@ -145,13 +145,13 @@ make clean; make -j 6
 
 cd examples/SedovBlast_3D/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv sedov_0000.hdf5 sedov_naive.hdf5
 
 cd ../EAGLE_12/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv eagle_0000.hdf5 eagle_12_naive.hdf5
 
@@ -167,13 +167,13 @@ make clean; make -j 6
 
 cd examples/SedovBlast_3D/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv sedov_0000.hdf5 sedov_serial.hdf5
 
 cd ../EAGLE_12/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv eagle_0000.hdf5 eagle_12_serial.hdf5
 
@@ -189,7 +189,7 @@ make clean; make -j 6
 
 cd examples/SedovBlast_3D/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 sedov.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv sedov_0000.hdf5 sedov_vec.hdf5
 
@@ -220,7 +220,7 @@ fi
 
 cd ../EAGLE_12/
 
-mpirun -np 4 ../swift_mpi -s -t 16 -n 5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
+mpirun -np 4 ../swift_mpi --hydro --threads=16 --steps=5 eagle_12.yml -P SPH:h_tolerance:10 -P Snapshots:compression:7
 
 mv eagle_0000.hdf5 eagle_12_vec.hdf5
 
diff --git a/tools/check_ngbs.py b/tools/check_ngbs.py
new file mode 100755
index 0000000000000000000000000000000000000000..648308cb4b2c142fba3ca8e25a024113d2d082f2
--- /dev/null
+++ b/tools/check_ngbs.py
@@ -0,0 +1,418 @@
+#!/usr/bin/env python
+
+import h5py as h
+import numpy as np
+import matplotlib
+
+matplotlib.use("Agg")
+from pylab import *
+import os.path
+
+kernel_gamma = 1.825742
+kernel_gamma2 = kernel_gamma * kernel_gamma
+kernel_gamma_dim = np.power(kernel_gamma, 3)
+hydro_dimension_unit_sphere = 4.0 * np.pi / 3.0
+kernel_norm = hydro_dimension_unit_sphere * kernel_gamma_dim
+error = False
+
+inputFile1 = ""
+inputFile2 = ""
+
+# Compare the values of two floats
+def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
+    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+
+
+# Check list of density neighbours and check that they are correct.
+def check_density_neighbours(
+    pids, ngb_ids_naive, ngb_ids_sort, mask, pos, h_naive, h_sort, num_invalid, acc
+):
+
+    for k in range(0, num_invalid):
+
+        # Filter neighbour lists for valid particle ids
+        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
+        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
+
+        # Check neighbour lists for differences
+        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
+
+        # Check for duplicate IDs
+        duplicate_check_naive = len(filter_neigh_naive) != len(set(filter_neigh_naive))
+        duplicate_check_sort = len(filter_neigh_sort) != len(set(filter_neigh_sort))
+
+        if duplicate_check_naive:
+            print("Duplicate neighbour ID found in: ", inputFile1)
+            print(filter_neigh_naive)
+            return True
+
+        if duplicate_check_sort:
+            print("Duplicate neighbour ID found in: ", inputFile2)
+            print(filter_neigh_sort)
+            return True
+
+        pid = pids[mask][k]
+
+        # Loop over discrepancies and check if they are actually neighbours
+        for pjd in id_list:
+            pi_pos = pos[np.where(pids == pid)]
+            pj_pos = pos[np.where(pids == pjd)]
+
+            hi = h_naive[np.where(pids == pid)]
+
+            dx = pi_pos[0][0] - pj_pos[0][0]
+            dy = pi_pos[0][1] - pj_pos[0][1]
+            dz = pi_pos[0][2] - pj_pos[0][2]
+
+            # Correct for BCs
+            dx = nearest(dx)
+            dy = nearest(dy)
+            dz = nearest(dz)
+
+            r2 = dx * dx + dy * dy + dz * dz
+
+            hig2 = hi * hi * kernel_gamma2
+
+            diff = abs(r2 - hig2)
+
+            print(
+                "Particle {} is missing {}, hig2: {}, r2: {}, |r2 - hig2|: {}".format(
+                    pid, pjd, hig2, r2, diff
+                )
+            )
+
+            if diff < acc * hig2:
+                print("Missing interaction due to precision issue will be ignored.")
+            else:
+                hi_2 = h_sort[np.where(pids == pid)]
+
+                # If a neigbour is missing and the particle has the same h throw
+                # an error.
+                if isclose(hi, hi_2):
+                    print(
+                        "Missing interaction found but particle has the same smoothing length (hi_1: %e, hi_2: %e)."
+                        % (hi, hi_2)
+                    )
+                    return True
+                else:
+                    print(
+                        "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."
+                        % (hi, hi_2)
+                    )
+
+    return False
+
+
+# Check list of force neighbours and check that they are correct.
+def check_force_neighbours(
+    pids, ngb_ids_naive, ngb_ids_sort, mask, pos, h_naive, h_sort, num_invalid, acc
+):
+
+    error_val = False
+
+    for k in range(0, num_invalid):
+
+        # Filter neighbour lists for valid particle ids
+        filter_neigh_naive = [i for i in ngb_ids_naive[mask][k] if i > -1]
+        filter_neigh_sort = [i for i in ngb_ids_sort[mask][k] if i > -1]
+
+        # Check neighbour lists for differences
+        id_list = set(filter_neigh_naive).symmetric_difference(set(filter_neigh_sort))
+
+        pid = pids[mask][k]
+
+        # Loop over discrepancies and check if they are actually neighbours
+        for pjd in id_list:
+            pi_pos = pos[np.where(pids == pid)]
+            pj_pos = pos[np.where(pids == pjd)]
+
+            hi = h_naive[np.where(pids == pid)]
+            hj = h_naive[np.where(pids == pjd)]
+
+            dx = pi_pos[0][0] - pj_pos[0][0]
+            dy = pi_pos[0][1] - pj_pos[0][1]
+            dz = pi_pos[0][2] - pj_pos[0][2]
+
+            # Correct for BCs
+            dx = nearest(dx)
+            dy = nearest(dy)
+            dz = nearest(dz)
+
+            r2 = dx * dx + dy * dy + dz * dz
+
+            hig2 = hi * hi * kernel_gamma2
+            hjg2 = hj * hj * kernel_gamma2
+
+            diff = abs(r2 - max(hig2, hjg2))
+
+            print(
+                "Particle {} is missing {}, hig2: {}, hjg2: {}, r2: {}, |r2 - max(hig2,hjg2)|: {}".format(
+                    pid, pjd, hig2, hjg2, r2, diff
+                )
+            )
+
+            if diff < acc * max(hig2, hjg2):
+                print("Missing interaction due to precision issue will be ignored.")
+            else:
+                hi_2 = h_sort[np.where(pids == pid)]
+                if isclose(hi, hi_2):
+                    print(
+                        "Missing interaction due to the same smoothing lengths will not be ignored (hi_1: %e, hi_2: %e)."
+                        % (hi, hi_2)
+                    )
+                    error_val = True
+                else:
+                    print(
+                        "Missing interaction due to different smoothing lengths will be ignored (hi_1: %e, hi_2: %e)."
+                        % (hi, hi_2)
+                    )
+
+    return error_val
+
+
+def nearest(dx):
+    if dx > 0.5 * box_size:
+        return dx - box_size
+    elif dx < -0.5 * box_size:
+        return dx + box_size
+    else:
+        return dx
+
+
+# Parse command line arguments
+if len(sys.argv) < 3:
+    print("Error: pass input files as arguments")
+    sys.exit()
+else:
+    inputFile1 = sys.argv[1]
+    inputFile2 = sys.argv[2]
+    if os.path.exists(inputFile1) != 1:
+        print("\n{} does not exist!\n".format(inputFile1))
+        sys.exit()
+    if os.path.exists(inputFile2) != 1:
+        print("\n{} does not exist!\n".format(inputFile2))
+        sys.exit()
+
+# Open input files
+file_naive = h.File(inputFile1, "r")
+file_sort = h.File(inputFile2, "r")
+
+box_size = file_naive["/Header"].attrs["BoxSize"][0]
+
+# Read input file fields
+ids_naive = file_naive["/PartType0/ParticleIDs"][:]
+ids_sort = file_sort["/PartType0/ParticleIDs"][:]
+
+h_naive = file_naive["/PartType0/SmoothingLength"][:]
+h_sort = file_sort["/PartType0/SmoothingLength"][:]
+
+pos_naive = file_naive["/PartType0/Coordinates"][:, :]
+# pos_sort = file_sort["/PartType0/Coordinates"][:,:]
+
+num_density_naive = file_naive["/PartType0/Num_ngb_density"][:]
+num_density_sort = file_sort["/PartType0/Num_ngb_density"][:]
+
+num_force_naive = file_naive["/PartType0/Num_ngb_force"][:]
+num_force_sort = file_sort["/PartType0/Num_ngb_force"][:]
+
+neighbour_ids_density_naive = file_naive["/PartType0/Ids_ngb_density"][:]
+neighbour_ids_density_sort = file_sort["/PartType0/Ids_ngb_density"][:]
+
+neighbour_ids_force_naive = file_naive["/PartType0/Ids_ngb_force"][:]
+neighbour_ids_force_sort = file_sort["/PartType0/Ids_ngb_force"][:]
+
+
+# wcount_naive = file_naive["/PartType0/Wcount"][:]
+# wcount_sort = file_sort["/PartType0/Wcount"][:]
+#
+# wcount_naive = wcount_naive * np.power(h_naive,3) * kernel_norm
+# wcount_sort = wcount_sort * np.power(h_sort,3) * kernel_norm
+
+# Cross check
+max_density_ngbs_naive = np.max(num_density_naive)
+max_density_ngbs_sort = np.max(num_density_sort)
+max_force_ngbs_naive = np.max(num_force_naive)
+max_force_ngbs_sort = np.max(num_force_sort)
+
+print("                   Min     Mean     Max ")
+print("                   ---------------------")
+print(
+    "Ngbs density naiv: ",
+    np.min(num_density_naive),
+    np.mean(num_density_naive),
+    max_density_ngbs_naive,
+)
+print(
+    "Ngbs density sort: ",
+    np.min(num_density_sort),
+    np.mean(num_density_sort),
+    max_density_ngbs_sort,
+)
+print(
+    "Ngbs force naiv:   ",
+    np.min(num_force_naive),
+    np.mean(num_force_naive),
+    max_force_ngbs_naive,
+)
+print(
+    "Ngbs force sort:   ",
+    np.min(num_force_sort),
+    np.mean(num_force_sort),
+    max_force_ngbs_sort,
+)
+# print "Wcount naiv:   ", np.min(wcount_naive), np.mean(wcount_naive), np.max(wcount_naive)
+# print "Wcount sort:   ", np.min(wcount_sort), np.mean(wcount_sort), np.max(wcount_sort)
+
+# Sort
+index_naive = np.argsort(ids_naive)
+index_sort = np.argsort(ids_sort)
+
+num_density_naive = num_density_naive[index_naive]
+num_density_sort = num_density_sort[index_sort]
+num_force_naive = num_force_naive[index_naive]
+num_force_sort = num_force_sort[index_sort]
+ids_naive = ids_naive[index_naive]
+ids_sort = ids_sort[index_sort]
+neighbour_ids_density_naive = neighbour_ids_density_naive[index_naive]
+neighbour_ids_density_sort = neighbour_ids_density_sort[index_sort]
+neighbour_ids_force_naive = neighbour_ids_force_naive[index_naive]
+neighbour_ids_force_sort = neighbour_ids_force_sort[index_sort]
+# wcount_naive = wcount_naive[index_naive]
+# wcount_sort = wcount_sort[index_sort]
+h_naive = h_naive[index_naive]
+h_sort = h_sort[index_sort]
+pos_naive = pos_naive[index_naive]
+# pos_sort = pos_sort[index_sort]
+
+neighbour_length_naive = len(neighbour_ids_density_naive[0])
+neighbour_length_sort = len(neighbour_ids_density_sort[0])
+
+# Check that input files are logging the same number of neighbours
+if neighbour_length_naive != neighbour_length_sort:
+    print("Input files have logged different numbers of neighbour lengths!")
+    print("{} has logged: {} neighbours".format(inputFile1, neighbour_length_naive))
+    print("{} has logged: {} neighbours".format(inputFile2, neighbour_length_sort))
+    exit(1)
+
+if (
+    max_density_ngbs_naive > neighbour_length_naive
+    or max_force_ngbs_naive > neighbour_length_naive
+    or max_density_ngbs_sort > neighbour_length_sort
+    or max_force_ngbs_sort > neighbour_length_sort
+):
+    print("The number of neighbours has exceeded the number of neighbours logged.")
+    print("Modify NUM_OF_NEIGHBOURS in hydro_part.h to log more neighbours.")
+    print(
+        "The highest neighbour count is: ",
+        max(
+            max_density_ngbs_naive,
+            max_force_ngbs_naive,
+            max_density_ngbs_sort,
+            max_force_ngbs_sort,
+        ),
+    )
+    exit(1)
+
+# First check
+print("\n                         Min    Max")
+print("                         ----------")
+print(
+    "Differences for density:  ",
+    min(num_density_naive - num_density_sort),
+    max(num_density_naive - num_density_sort),
+)
+print(
+    "Differences for force:    ",
+    min(num_force_naive - num_force_sort),
+    max(num_force_naive - num_force_sort),
+)
+
+# Get the IDs that are different
+mask_density = num_density_naive != num_density_sort
+mask_force = num_force_naive != num_force_sort
+num_invalid_density = np.sum(mask_density)
+num_invalid_force = np.sum(mask_force)
+
+print("\nNum non-zero density: ", num_invalid_density)
+print("Num non-zero force:   ", num_invalid_force)
+
+print("\nParticle IDs with incorrect densities")
+print("----------------------------------------")
+print(ids_naive[mask_density])
+
+# Check density neighbour lists
+error += check_density_neighbours(
+    ids_naive,
+    neighbour_ids_density_naive,
+    neighbour_ids_density_sort,
+    mask_density,
+    pos_naive,
+    h_naive,
+    h_sort,
+    num_invalid_density,
+    2e-6,
+)
+
+print("Num of density interactions", inputFile1)
+print(num_density_naive[mask_density])
+
+print("Num of density interactions", inputFile2)
+print(num_density_sort[mask_density])
+
+print("\nParticle IDs with incorrect forces")
+print("------------------------------------")
+print(ids_naive[mask_force])
+
+# Check force neighbour lists
+error += check_force_neighbours(
+    ids_naive,
+    neighbour_ids_force_naive,
+    neighbour_ids_force_sort,
+    mask_force,
+    pos_naive,
+    h_naive,
+    h_sort,
+    num_invalid_force,
+    2e-6,
+)
+
+print("Num of force interactions", inputFile1)
+print(num_force_naive[mask_force])
+
+# print "Smoothing lengths", inputFile1
+# print h_naive[mask_force]
+
+print("Num of force interactions", inputFile2)
+print(num_force_sort[mask_force])
+
+# print "Smoothing lengths", inputFile2
+# print h_sort[mask_force]
+
+# Statistics of h difference
+h_relative = (h_naive - h_sort) / h_naive
+print(
+    "h statistics: {} {} (Min, 1st Percentile)".format(
+        np.min(h_relative), np.percentile(h_relative, 1)
+    )
+)
+print(
+    "h statistics: {} {} (Mean, Median)".format(
+        np.mean(h_relative), np.median(h_relative)
+    )
+)
+print(
+    "h statistics: {} {} (Max, 99th Percentile)".format(
+        np.max(h_relative), np.percentile(h_relative, 99)
+    )
+)
+
+if error:
+    print("\n------------------")
+    print("Differences found.")
+    print("------------------")
+    exit(1)
+else:
+    print("\n---------------------")
+    print("No differences found.")
+    print("---------------------")
+    exit(0)
diff --git a/tools/combine_ics.py b/tools/combine_ics.py
new file mode 100755
index 0000000000000000000000000000000000000000..64f255a61934bc3667fdb5934f74a206013e4872
--- /dev/null
+++ b/tools/combine_ics.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+"""
+Usage:
+    combine_ics.py input_file.0.hdf5 merged_file.hdf5 gzip_level
+
+This file combines Gadget-2 type 2 (i.e. hdf5) initial condition files
+into a single file that can be digested by SWIFT. 
+This has mainly be tested for DM-only (parttype1) files but also works
+smoothly for ICs including gas. The special case of a mass-table for
+the DM particles is handled. No unit conversions are applied nor are
+any scale-factors or h-factors changed.
+The script applies some compression and checksum filters to the output
+to save disk space. 
+The last argument `gzip_level` is used to specify the level of compression
+to apply to all the fields in the file. Use 0 to cancel all coompression.
+The default value is `4`.
+
+This file is part of SWIFT.
+Copyright (C) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
+
+All Rights Reserved.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import sys
+import h5py as h5
+import numpy as np
+
+# Store the compression level
+gzip_level = 4
+if len(sys.argv) > 3:
+    gzip_level = sys.argv[3]
+
+# First, we need to collect some information from the master file
+main_file_name = str(sys.argv[1])[:-7]
+print("Merging snapshots files with name", main_file_name)
+master_file_name = main_file_name + ".0.hdf5"
+print("Reading master information from", master_file_name)
+master_file = h5.File(master_file_name, "r")
+grp_header = master_file["/Header"]
+
+num_files = grp_header.attrs["NumFilesPerSnapshot"]
+tot_num_parts = grp_header.attrs["NumPart_Total"]
+tot_num_parts_high_word = grp_header.attrs["NumPart_Total_HighWord"]
+entropy_flag = grp_header.attrs["Flag_Entropy_ICs"]
+box_size = grp_header.attrs["BoxSize"]
+time = grp_header.attrs["Time"]
+
+# Combine the low- and high-words
+tot_num_parts = tot_num_parts.astype(np.int64)
+for i in range(6):
+    tot_num_parts[i] += (np.int64(tot_num_parts_high_word[i]) << 32)
+
+# Some basic information
+print("Reading", tot_num_parts, "particles from", num_files, "files.")
+
+# Check whether there is a mass table
+DM_mass = 0.0
+mtable = grp_header.attrs.get("MassTable")
+if mtable is not None:
+    DM_mass = grp_header.attrs["MassTable"][1]
+if DM_mass != 0.0:
+    print("DM mass set to", DM_mass, "from the header mass table.")
+else:
+    print("Reading DM mass from the particles.")
+
+
+# Create the empty file
+output_file_name = sys.argv[2]
+output_file = h5.File(output_file_name, "w-")
+
+
+# Header
+grp = output_file.create_group("/Header")
+grp.attrs["NumFilesPerSnapshot"] = 1
+grp.attrs["NumPart_Total"] = tot_num_parts
+grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
+grp.attrs["NumPart_ThisFile"] = tot_num_parts
+grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+grp.attrs["BoxSize"] = box_size
+grp.attrs["Flag_Entropy_ICs"] = entropy_flag
+grp.attrs["Time"] = time
+
+# Create the particle groups
+if tot_num_parts[0] > 0:
+    grp0 = output_file.create_group("/PartType0")
+if tot_num_parts[1] > 0:
+    grp1 = output_file.create_group("/PartType1")
+if tot_num_parts[4] > 0:
+    grp4 = output_file.create_group("/PartType4")
+if tot_num_parts[5] > 0:
+    grp5 = output_file.create_group("/PartType5")
+
+
+# Helper function to create the datasets we need
+def create_set(grp, name, size, dim, dtype):
+    if dim == 1:
+        grp.create_dataset(
+            name,
+            (size,),
+            dtype=dtype,
+            chunks=True,
+            compression="gzip",
+            compression_opts=gzip_level,
+            shuffle=True,
+            fletcher32=True,
+            maxshape=(size,),
+        )
+    else:
+        grp.create_dataset(
+            name,
+            (size, dim),
+            dtype=dtype,
+            chunks=True,
+            compression="gzip",
+            compression_opts=gzip_level,
+            shuffle=True,
+            fletcher32=True,
+            maxshape=(size, dim),
+        )
+
+
+# Create the required datasets
+if tot_num_parts[0] > 0:
+    create_set(grp0, "Coordinates", tot_num_parts[0], 3, "d")
+    create_set(grp0, "Velocities", tot_num_parts[0], 3, "f")
+    create_set(grp0, "Masses", tot_num_parts[0], 1, "f")
+    create_set(grp0, "ParticleIDs", tot_num_parts[0], 1, "l")
+    create_set(grp0, "InternalEnergy", tot_num_parts[0], 1, "f")
+    create_set(grp0, "SmoothingLength", tot_num_parts[0], 1, "f")
+
+if tot_num_parts[1] > 0:
+    create_set(grp1, "Coordinates", tot_num_parts[1], 3, "d")
+    create_set(grp1, "Velocities", tot_num_parts[1], 3, "f")
+    create_set(grp1, "Masses", tot_num_parts[1], 1, "f")
+    create_set(grp1, "ParticleIDs", tot_num_parts[1], 1, "l")
+
+if tot_num_parts[4] > 0:
+    create_set(grp4, "Coordinates", tot_num_parts[4], 3, "d")
+    create_set(grp4, "Velocities", tot_num_parts[4], 3, "f")
+    create_set(grp4, "Masses", tot_num_parts[4], 1, "f")
+    create_set(grp4, "ParticleIDs", tot_num_parts[4], 1, "l")
+
+if tot_num_parts[5] > 0:
+    create_set(grp5, "Coordinates", tot_num_parts[5], 3, "d")
+    create_set(grp5, "Velocities", tot_num_parts[5], 3, "f")
+    create_set(grp5, "Masses", tot_num_parts[5], 1, "f")
+    create_set(grp5, "ParticleIDs", tot_num_parts[5], 1, "l")
+
+# Heavy-lifting ahead. Leave a last message.
+print("Datasets created in output file")
+
+
+# Special case of the non-zero mass table
+if DM_mass != 0.0:
+    masses = np.ones(tot_num_parts[1], dtype=np.float) * DM_mass
+    grp1["Masses"][:] = masses
+
+
+# Cumulative number of particles read/written
+cumul_parts = [0, 0, 0, 0, 0, 0]
+
+# Loop over all the files that are part of the snapshots
+for f in range(num_files):
+
+    file_name = main_file_name + "." + str(f) + ".hdf5"
+    file = h5.File(file_name, "r")
+    file_header = file["/Header"]
+    num_parts = file_header.attrs["NumPart_ThisFile"]
+
+    print(
+        "Copying data from file",
+        f,
+        "/",
+        num_files,
+        ": num_parts = [",
+        num_parts[0],
+        num_parts[1],
+        num_parts[4],
+        num_parts[5],
+        "]",
+    )
+    sys.stdout.flush()
+
+    # Helper function to copy data
+    def copy_grp(name_new, name_old, ptype):
+        full_name_new = "/PartType" + str(ptype) + "/" + name_new
+        full_name_old = "/PartType" + str(ptype) + "/" + name_old
+        output_file[full_name_new][
+            cumul_parts[ptype] : cumul_parts[ptype] + num_parts[ptype]
+        ] = file[full_name_old]
+
+    def copy_grp_same_name(name, ptype):
+        copy_grp(name, name, ptype)
+
+    if num_parts[0] > 0:
+        copy_grp_same_name("Coordinates", 0)
+        copy_grp_same_name("Velocities", 0)
+        copy_grp_same_name("Masses", 0)
+        copy_grp_same_name("ParticleIDs", 0)
+        copy_grp_same_name("InternalEnergy", 0)
+        copy_grp_same_name("SmoothingLength", 0)
+
+    if num_parts[1] > 0:
+        copy_grp_same_name("Coordinates", 1)
+        copy_grp_same_name("Velocities", 1)
+        copy_grp_same_name("ParticleIDs", 1)
+        if DM_mass == 0.0:  # Do not overwrite values if there was a mass table
+            copy_grp_same_name("Masses", 1)
+
+    if num_parts[4] > 0:
+        copy_grp_same_name("Coordinates", 4)
+        copy_grp_same_name("Velocities", 4)
+        copy_grp_same_name("Masses", 4)
+        copy_grp_same_name("ParticleIDs", 4)
+
+    if num_parts[5] > 0:
+        copy_grp_same_name("Coordinates", 5)
+        copy_grp_same_name("Velocities", 5)
+        copy_grp_same_name("Masses", 5)
+        copy_grp_same_name("ParticleIDs", 5)
+
+    cumul_parts[0] += num_parts[0]
+    cumul_parts[1] += num_parts[1]
+    cumul_parts[4] += num_parts[4]
+    cumul_parts[5] += num_parts[5]
+    file.close()
+
+print("All done! SWIFT is waiting.")
diff --git a/tools/plot_gravity_checks.py b/tools/plot_gravity_checks.py
new file mode 100755
index 0000000000000000000000000000000000000000..cef81b86e9663bc7c9df2c9affaeb258501f57e6
--- /dev/null
+++ b/tools/plot_gravity_checks.py
@@ -0,0 +1,456 @@
+#!/usr/bin/env python
+
+import sys
+import glob
+import re
+import numpy as np
+import matplotlib.pyplot as plt
+
+params = {
+    "axes.labelsize": 14,
+    "axes.titlesize": 18,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 14,
+    "ytick.labelsize": 14,
+    "text.usetex": True,
+    "figure.figsize": (12, 10),
+    "figure.subplot.left": 0.06,
+    "figure.subplot.right": 0.99,
+    "figure.subplot.bottom": 0.06,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.14,
+    "figure.subplot.hspace": 0.14,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+    "text.latex.unicode": True,
+}
+plt.rcParams.update(params)
+plt.rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+min_error = 1e-7
+max_error = 3e-1
+num_bins = 64
+
+# Construct the bins
+bin_edges = np.linspace(np.log10(min_error), np.log10(max_error), num_bins + 1)
+bin_size = (np.log10(max_error) - np.log10(min_error)) / num_bins
+bins = 0.5 * (bin_edges[1:] + bin_edges[:-1])
+bin_edges = 10 ** bin_edges
+bins = 10 ** bins
+
+# Colours
+cols = ["#332288", "#88CCEE", "#117733", "#DDCC77", "#CC6677"]
+
+# Time-step to plot
+step = int(sys.argv[1])
+periodic = int(sys.argv[2])
+
+# Find the files for the different expansion orders
+order_list = glob.glob("gravity_checks_swift_step%.4d_order*.dat" % step)
+num_order = len(order_list)
+
+# Get the multipole orders
+order = np.zeros(num_order)
+for i in range(num_order):
+    order[i] = int(order_list[i][35])
+order = sorted(order)
+order_list = sorted(order_list)
+
+# Read the exact accelerations first
+if periodic:
+    data = np.loadtxt("gravity_checks_exact_periodic_step%.4d.dat" % step)
+else:
+    data = np.loadtxt("gravity_checks_exact_step%.4d.dat" % step)
+exact_ids = data[:, 0]
+exact_pos = data[:, 1:4]
+exact_a = data[:, 4:7]
+exact_pot = data[:, 7]
+# Sort stuff
+sort_index = np.argsort(exact_ids)
+exact_ids = exact_ids[sort_index]
+exact_pos = exact_pos[sort_index, :]
+exact_a = exact_a[sort_index, :]
+exact_pot = exact_pot[sort_index]
+exact_a_norm = np.sqrt(exact_a[:, 0] ** 2 + exact_a[:, 1] ** 2 + exact_a[:, 2] ** 2)
+
+print("Number of particles tested:", np.size(exact_ids))
+
+# Start the plot
+plt.figure()
+
+count = 0
+
+# Get the Gadget-2 data if existing
+if periodic:
+    gadget2_file_list = glob.glob("forcetest_gadget2_periodic.txt")
+else:
+    gadget2_file_list = glob.glob("forcetest_gadget2.txt")
+if len(gadget2_file_list) != 0:
+
+    gadget2_data = np.loadtxt(gadget2_file_list[0])
+    gadget2_ids = gadget2_data[:, 0]
+    gadget2_pos = gadget2_data[:, 1:4]
+    gadget2_a_exact = gadget2_data[:, 4:7]
+    gadget2_a_grav = gadget2_data[:, 7:10]
+
+    # Sort stuff
+    sort_index = np.argsort(gadget2_ids)
+    gadget2_ids = gadget2_ids[sort_index]
+    gadget2_pos = gadget2_pos[sort_index, :]
+    gadget2_a_exact = gadget2_a_exact[sort_index, :]
+    gadget2_exact_a_norm = np.sqrt(
+        gadget2_a_exact[:, 0] ** 2
+        + gadget2_a_exact[:, 1] ** 2
+        + gadget2_a_exact[:, 2] ** 2
+    )
+    gadget2_a_grav = gadget2_a_grav[sort_index, :]
+
+    # Cross-checks
+    if not np.array_equal(exact_ids, gadget2_ids):
+        print("Comparing different IDs !")
+
+    if np.max(np.abs(exact_pos - gadget2_pos) / np.abs(gadget2_pos)) > 1e-6:
+        print("Comparing different positions ! max difference:")
+        index = np.argmax(
+            exact_pos[:, 0] ** 2
+            + exact_pos[:, 1] ** 2
+            + exact_pos[:, 2] ** 2
+            - gadget2_pos[:, 0] ** 2
+            - gadget2_pos[:, 1] ** 2
+            - gadget2_pos[:, 2] ** 2
+        )
+        print(
+            "Gadget2 (id=%d):" % gadget2_ids[index],
+            gadget2_pos[index, :],
+            "exact (id=%d):" % exact_ids[index],
+            exact_pos[index, :],
+            "\n",
+        )
+
+    diff = np.abs(exact_a_norm - gadget2_exact_a_norm) / np.abs(gadget2_exact_a_norm)
+    max_diff = np.max(diff)
+    if max_diff > 2e-6:
+        print("Comparing different exact accelerations !")
+        print(
+            "Median=",
+            np.median(diff),
+            "Mean=",
+            np.mean(diff),
+            "99%=",
+            np.percentile(diff, 99),
+        )
+        print("max difference ( relative diff =", max_diff, "):")
+        # index = np.argmax(exact_a[:,0]**2 + exact_a[:,1]**2 + exact_a[:,2]**2 - gadget2_a_exact[:,0]**2 - gadget2_a_exact[:,1]**2 - gadget2_a_exact[:,2]**2)
+        index = np.argmax(diff)
+        print(
+            "a_exact --- Gadget2:",
+            gadget2_a_exact[index, :],
+            "exact:",
+            exact_a[index, :],
+        )
+        print(
+            "pos ---     Gadget2: (id=%d):" % gadget2_ids[index],
+            gadget2_pos[index, :],
+            "exact (id=%d):" % gadget2_ids[index],
+            gadget2_pos[index, :],
+            "\n",
+        )
+
+    # Compute the error norm
+    diff = gadget2_a_exact - gadget2_a_grav
+
+    norm_diff = np.sqrt(diff[:, 0] ** 2 + diff[:, 1] ** 2 + diff[:, 2] ** 2)
+    norm_a = np.sqrt(
+        gadget2_a_exact[:, 0] ** 2
+        + gadget2_a_exact[:, 1] ** 2
+        + gadget2_a_exact[:, 2] ** 2
+    )
+
+    norm_error = norm_diff / norm_a
+    error_x = abs(diff[:, 0]) / norm_a
+    error_y = abs(diff[:, 1]) / norm_a
+    error_z = abs(diff[:, 2]) / norm_a
+
+    # Bin the error
+    norm_error_hist, _ = np.histogram(norm_error, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_x_hist, _ = np.histogram(error_x, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_y_hist, _ = np.histogram(error_y, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_z_hist, _ = np.histogram(error_z, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+
+    norm_median = np.median(norm_error)
+    median_x = np.median(error_x)
+    median_y = np.median(error_y)
+    median_z = np.median(error_z)
+
+    norm_per99 = np.percentile(norm_error, 99)
+    per99_x = np.percentile(error_x, 99)
+    per99_y = np.percentile(error_y, 99)
+    per99_z = np.percentile(error_z, 99)
+
+    norm_max = np.max(norm_error)
+    max_x = np.max(error_x)
+    max_y = np.max(error_y)
+    max_z = np.max(error_z)
+
+    print("Gadget-2 ---- ")
+    print("Norm: median= %f 99%%= %f max= %f" % (norm_median, norm_per99, norm_max))
+    print("X   : median= %f 99%%= %f max= %f" % (median_x, per99_x, max_x))
+    print("Y   : median= %f 99%%= %f max= %f" % (median_y, per99_y, max_y))
+    print("Z   : median= %f 99%%= %f max= %f" % (median_z, per99_z, max_z))
+    print("")
+
+    plt.subplot(231)
+    plt.text(
+        min_error * 1.5,
+        1.55,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (norm_median, norm_per99),
+        ha="left",
+        va="top",
+        alpha=0.8,
+    )
+    plt.semilogx(bins, norm_error_hist, "k--", label="Gadget-2", alpha=0.8)
+    plt.subplot(232)
+    plt.semilogx(bins, error_x_hist, "k--", label="Gadget-2", alpha=0.8)
+    plt.text(
+        min_error * 1.5,
+        1.55,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_x, per99_x),
+        ha="left",
+        va="top",
+        alpha=0.8,
+    )
+    plt.subplot(233)
+    plt.semilogx(bins, error_y_hist, "k--", label="Gadget-2", alpha=0.8)
+    plt.text(
+        min_error * 1.5,
+        1.55,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_y, per99_y),
+        ha="left",
+        va="top",
+        alpha=0.8,
+    )
+    plt.subplot(234)
+    plt.semilogx(bins, error_z_hist, "k--", label="Gadget-2", alpha=0.8)
+    plt.text(
+        min_error * 1.5,
+        1.55,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_z, per99_z),
+        ha="left",
+        va="top",
+        alpha=0.8,
+    )
+
+    count += 1
+
+
+# Plot the different histograms
+for i in range(num_order):
+    data = np.loadtxt(order_list[i])
+    ids = data[:, 0]
+    pos = data[:, 1:4]
+    a_grav = data[:, 4:7]
+    pot = data[:, 7]
+
+    # Sort stuff
+    sort_index = np.argsort(ids)
+    ids = ids[sort_index]
+    pos = pos[sort_index, :]
+    a_grav = a_grav[sort_index, :]
+    pot = pot[sort_index]
+
+    # Cross-checks
+    if not np.array_equal(exact_ids, ids):
+        print("Comparing different IDs !")
+
+    if np.max(np.abs(exact_pos - pos) / np.abs(pos)) > 1e-6:
+        print("Comparing different positions ! max difference:")
+        index = np.argmax(
+            exact_pos[:, 0] ** 2
+            + exact_pos[:, 1] ** 2
+            + exact_pos[:, 2] ** 2
+            - pos[:, 0] ** 2
+            - pos[:, 1] ** 2
+            - pos[:, 2] ** 2
+        )
+        print(
+            "SWIFT (id=%d):" % ids[index],
+            pos[index, :],
+            "exact (id=%d):" % exact_ids[index],
+            exact_pos[index, :],
+            "\n",
+        )
+
+    # Compute the error norm
+    diff = exact_a - a_grav
+    diff_pot = exact_pot - pot
+
+    # Correct for different normalization of potential
+    print("Difference in normalization of potential:", np.mean(diff_pot), end=" ")
+    print(
+        "std_dev=",
+        np.std(diff_pot),
+        "99-percentile:",
+        np.percentile(diff_pot, 99) - np.median(diff_pot),
+        "1-percentile:",
+        np.median(diff_pot) - np.percentile(diff_pot, 1),
+    )
+
+    exact_pot -= np.mean(diff_pot)
+    diff_pot = exact_pot - pot
+
+    norm_diff = np.sqrt(diff[:, 0] ** 2 + diff[:, 1] ** 2 + diff[:, 2] ** 2)
+
+    norm_error = norm_diff / exact_a_norm
+    error_x = abs(diff[:, 0]) / exact_a_norm
+    error_y = abs(diff[:, 1]) / exact_a_norm
+    error_z = abs(diff[:, 2]) / exact_a_norm
+    error_pot = abs(diff_pot) / abs(exact_pot)
+
+    # Bin the error
+    norm_error_hist, _ = np.histogram(norm_error, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_x_hist, _ = np.histogram(error_x, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_y_hist, _ = np.histogram(error_y, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_z_hist, _ = np.histogram(error_z, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+    error_pot_hist, _ = np.histogram(error_pot, bins=bin_edges, density=False) / (
+        np.size(norm_error) * bin_size
+    )
+
+    norm_median = np.median(norm_error)
+    median_x = np.median(error_x)
+    median_y = np.median(error_y)
+    median_z = np.median(error_z)
+    median_pot = np.median(error_pot)
+
+    norm_per99 = np.percentile(norm_error, 99)
+    per99_x = np.percentile(error_x, 99)
+    per99_y = np.percentile(error_y, 99)
+    per99_z = np.percentile(error_z, 99)
+    per99_pot = np.percentile(error_pot, 99)
+
+    norm_max = np.max(norm_error)
+    max_x = np.max(error_x)
+    max_y = np.max(error_y)
+    max_z = np.max(error_z)
+    max_pot = np.max(error_pot)
+
+    print("Order %d ---- " % order[i])
+    print("Norm: median= %f 99%%= %f max= %f" % (norm_median, norm_per99, norm_max))
+    print("X   : median= %f 99%%= %f max= %f" % (median_x, per99_x, max_x))
+    print("Y   : median= %f 99%%= %f max= %f" % (median_y, per99_y, max_y))
+    print("Z   : median= %f 99%%= %f max= %f" % (median_z, per99_z, max_z))
+    print("Pot : median= %f 99%%= %f max= %f" % (median_pot, per99_pot, max_pot))
+    print("")
+
+    plt.subplot(231)
+    plt.semilogx(
+        bins, error_x_hist, color=cols[i], label="SWIFT m-poles order %d" % order[i]
+    )
+    plt.text(
+        min_error * 1.5,
+        1.5 - count / 10.0,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_x, per99_x),
+        ha="left",
+        va="top",
+        color=cols[i],
+    )
+    plt.subplot(232)
+    plt.semilogx(
+        bins, error_y_hist, color=cols[i], label="SWIFT m-poles order %d" % order[i]
+    )
+    plt.text(
+        min_error * 1.5,
+        1.5 - count / 10.0,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_y, per99_y),
+        ha="left",
+        va="top",
+        color=cols[i],
+    )
+    plt.subplot(233)
+    plt.semilogx(
+        bins, error_z_hist, color=cols[i], label="SWIFT m-poles order %d" % order[i]
+    )
+    plt.text(
+        min_error * 1.5,
+        1.5 - count / 10.0,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_z, per99_z),
+        ha="left",
+        va="top",
+        color=cols[i],
+    )
+    plt.subplot(234)
+    plt.semilogx(
+        bins, norm_error_hist, color=cols[i], label="SWIFT m-poles order %d" % order[i]
+    )
+    plt.text(
+        min_error * 1.5,
+        1.5 - count / 10.0,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (norm_median, norm_per99),
+        ha="left",
+        va="top",
+        color=cols[i],
+    )
+    plt.subplot(235)
+    plt.semilogx(
+        bins, error_pot_hist, color=cols[i], label="SWIFT m-poles order %d" % order[i]
+    )
+    plt.text(
+        min_error * 1.5,
+        1.5 - count / 10.0,
+        "$50\\%%\\rightarrow%.4f~~ 99\\%%\\rightarrow%.4f$" % (median_pot, per99_pot),
+        ha="left",
+        va="top",
+        color=cols[i],
+    )
+
+    count += 1
+
+plt.subplot(231)
+plt.xlabel("$\delta a_x/|\overrightarrow{a}_{exact}|$")
+# plt.ylabel("Density")
+plt.xlim(min_error, max_error)
+plt.ylim(0, 1.75)
+# plt.legend(loc="center left")
+plt.subplot(232)
+plt.xlabel("$\delta a_y/|\overrightarrow{a}_{exact}|$")
+# plt.ylabel("Density")
+plt.xlim(min_error, max_error)
+plt.ylim(0, 1.75)
+# plt.legend(loc="center left")
+plt.subplot(233)
+plt.xlabel("$\delta a_z/|\overrightarrow{a}_{exact}|$")
+# plt.ylabel("Density")
+plt.xlim(min_error, max_error)
+plt.ylim(0, 1.75)
+plt.subplot(234)
+plt.xlabel("$|\delta \overrightarrow{a}|/|\overrightarrow{a}_{exact}|$")
+# plt.ylabel("Density")
+plt.xlim(min_error, max_error)
+plt.ylim(0, 2.5)
+plt.legend(loc="upper left")
+plt.subplot(235)
+plt.xlabel("$\delta \phi/\phi_{exact}$")
+# plt.ylabel("Density")
+plt.xlim(min_error, max_error)
+plt.ylim(0, 1.75)
+# plt.legend(loc="center left")
+
+
+plt.savefig("gravity_checks_step%.4d.png" % step, dpi=200)
+plt.savefig("gravity_checks_step%.4d.pdf" % step, dpi=200)
diff --git a/tools/plot_scaling_results.py b/tools/plot_scaling_results.py
new file mode 100755
index 0000000000000000000000000000000000000000..2c29d93f88cbe4bde83f3473e9f738a526480c1c
--- /dev/null
+++ b/tools/plot_scaling_results.py
@@ -0,0 +1,365 @@
+#!/usr/bin/env python
+#
+# Usage:
+#  python plot_scaling_results.py input-file1-ext input-file2-ext ...
+#
+# Description:
+# Plots speed up, parallel efficiency and time to solution given a "timesteps" output file generated by SWIFT.
+#
+# Example:
+# python plot_scaling_results.py _hreads_cosma_stdout.txt _threads_knl_stdout.txt
+#
+# The working directory should contain files 1_threads_cosma_stdout.txt - 64_threads_cosma_stdout.txt and 1_threads_knl_stdout.txt - 64_threads_knl_stdout.txt, i.e wall clock time for each run using a given number of threads
+
+import sys
+import glob
+import re
+import numpy as np
+import matplotlib.pyplot as plt
+import scipy.stats
+import ntpath
+
+params = {
+    "axes.labelsize": 14,
+    "axes.titlesize": 18,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 14,
+    "ytick.labelsize": 14,
+    "text.usetex": True,
+    "figure.subplot.left": 0.055,
+    "figure.subplot.right": 0.98,
+    "figure.subplot.bottom": 0.05,
+    "figure.subplot.top": 0.95,
+    "figure.subplot.wspace": 0.14,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+    "text.latex.unicode": True,
+}
+plt.rcParams.update(params)
+plt.rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+version = []
+branch = []
+revision = []
+hydro_scheme = []
+hydro_kernel = []
+hydro_neighbours = []
+hydro_eta = []
+threadList = []
+hexcols = [
+    "#332288",
+    "#88CCEE",
+    "#44AA99",
+    "#117733",
+    "#999933",
+    "#DDCC77",
+    "#CC6677",
+    "#882255",
+    "#AA4499",
+    "#661100",
+    "#6699CC",
+    "#AA4466",
+    "#4477AA",
+]
+linestyle = (
+    hexcols[0],
+    hexcols[1],
+    hexcols[3],
+    hexcols[5],
+    hexcols[6],
+    hexcols[8],
+    hexcols[2],
+    hexcols[4],
+    hexcols[7],
+    hexcols[9],
+)
+numTimesteps = 0
+legendTitle = " "
+
+inputFileNames = []
+
+# Work out how many data series there are
+if len(sys.argv) == 1:
+    print("Please specify an input file in the arguments.")
+    sys.exit()
+else:
+    for fileName in sys.argv[1:]:
+        inputFileNames.append(fileName)
+    numOfSeries = int(len(sys.argv) - 1)
+
+# Get the names of the branch, Git revision, hydro scheme and hydro kernel
+def parse_header(inputFile):
+    with open(inputFile, "r") as f:
+        found_end = False
+        for line in f:
+            if "Branch:" in line:
+                s = line.split()
+                line = s[2:]
+                branch.append(" ".join(line))
+            elif "Revision:" in line:
+                s = line.split()
+                revision.append(s[2])
+            elif "Hydrodynamic scheme:" in line:
+                line = line[2:-1]
+                s = line.split()
+                line = s[2:]
+                hydro_scheme.append(" ".join(line))
+            elif "Hydrodynamic kernel:" in line:
+                line = line[2:-1]
+                s = line.split()
+                line = s[2:5]
+                hydro_kernel.append(" ".join(line))
+            elif "neighbours:" in line:
+                s = line.split()
+                hydro_neighbours.append(s[4])
+            elif "Eta:" in line:
+                s = line.split()
+                hydro_eta.append(s[2])
+    return
+
+
+# Parse file and return total time taken, speed up and parallel efficiency
+def parse_files():
+
+    totalTime = []
+    sumTotal = []
+    speedUp = []
+    parallelEff = []
+
+    for i in range(0, numOfSeries):  # Loop over each data series
+
+        # Get path to set of files
+        path, name = ntpath.split(inputFileNames[i])
+
+        # Get each file that starts with the cmd line arg
+        file_list = glob.glob(inputFileNames[i] + "*")
+
+        threadList.append([])
+
+        # Remove path from file names
+        for j in range(0, len(file_list)):
+            p, filename = ntpath.split(file_list[j])
+            file_list[j] = filename
+
+        # Create a list of threads using the list of files
+        for fileName in file_list:
+            s = re.split(r"[_.]+", fileName)
+            threadList[i].append(int(s[1]))
+
+        # Re-add path once each file has been found
+        if len(path) != 0:
+            for j in range(0, len(file_list)):
+                file_list[j] = path + "/" + file_list[j]
+
+        # Sort the thread list in ascending order and save the indices
+        sorted_indices = np.argsort(threadList[i])
+        threadList[i].sort()
+
+        # Sort the file list in ascending order acording to the thread number
+        file_list = [file_list[j] for j in sorted_indices]
+
+        parse_header(file_list[0])
+
+        branch[i] = branch[i].replace("_", "\\_")
+
+        # version.append("$\\textrm{%s}$"%str(branch[i]))# + " " + revision[i])# + "\n" + hydro_scheme[i] +
+        #                   "\n" + hydro_kernel[i] + r", $N_{ngb}=%d$"%float(hydro_neighbours[i]) +
+        #                   r", $\eta=%.3f$"%float(hydro_eta[i]))
+        totalTime.append([])
+        speedUp.append([])
+        parallelEff.append([])
+
+        # Loop over all files for a given series and load the times
+        for j in range(0, len(file_list)):
+            times = np.loadtxt(file_list[j], usecols=(9,))
+            updates = np.loadtxt(file_list[j], usecols=(6,))
+            totalTime[i].append(np.sum(times))
+
+        sumTotal.append(np.sum(totalTime[i]))
+
+    # Sort the total times in descending order
+    sorted_indices = np.argsort(sumTotal)[::-1]
+
+    totalTime = [totalTime[j] for j in sorted_indices]
+    branchNew = [branch[j] for j in sorted_indices]
+
+    for i in range(0, numOfSeries):
+        version.append("$\\textrm{%s}$" % str(branchNew[i]))
+
+    global numTimesteps
+    numTimesteps = len(times)
+
+    # Find speed-up and parallel efficiency
+    for i in range(0, numOfSeries):
+        for j in range(0, len(file_list)):
+            speedUp[i].append(totalTime[i][0] / totalTime[i][j])
+            parallelEff[i].append(speedUp[i][j] / threadList[i][j])
+
+    return (totalTime, speedUp, parallelEff)
+
+
+def print_results(totalTime, parallelEff, version):
+
+    for i in range(0, numOfSeries):
+        print(" ")
+        print("------------------------------------")
+        print(version[i])
+        print("------------------------------------")
+        print("Wall clock time for: {} time steps".format(numTimesteps))
+        print("------------------------------------")
+
+        for j in range(0, len(threadList[i])):
+            print(str(threadList[i][j]) + " threads: {}".format(totalTime[i][j]))
+
+        print(" ")
+        print("------------------------------------")
+        print("Parallel Efficiency for: {} time steps".format(numTimesteps))
+        print("------------------------------------")
+
+        for j in range(0, len(threadList[i])):
+            print(str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j]))
+
+    return
+
+
+# Returns a lighter/darker version of the colour
+def color_variant(hex_color, brightness_offset=1):
+
+    rgb_hex = [hex_color[x : x + 2] for x in [1, 3, 5]]
+    new_rgb_int = [int(hex_value, 16) + brightness_offset for hex_value in rgb_hex]
+    new_rgb_int = [
+        min([255, max([0, i])]) for i in new_rgb_int
+    ]  # make sure new values are between 0 and 255
+    # hex() produces "0x88", we want just "88"
+
+    return "#" + "".join([hex(i)[2:] for i in new_rgb_int])
+
+
+def plot_results(totalTime, speedUp, parallelEff, numSeries):
+
+    fig, axarr = plt.subplots(2, 2, figsize=(10, 10), frameon=True)
+    speedUpPlot = axarr[0, 0]
+    parallelEffPlot = axarr[0, 1]
+    totalTimePlot = axarr[1, 0]
+    emptyPlot = axarr[1, 1]
+
+    # Plot speed up
+    speedUpPlot.plot(threadList[0], threadList[0], linestyle="--", lw=1.5, color="0.2")
+    for i in range(0, numSeries):
+        speedUpPlot.plot(threadList[0], speedUp[i], linestyle[i], label=version[i])
+
+    speedUpPlot.set_ylabel("${\\rm Speed\\textendash up}$", labelpad=0.0)
+    speedUpPlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    speedUpPlot.set_xlim([0.7, threadList[0][-1] + 1])
+    speedUpPlot.set_ylim([0.7, threadList[0][-1] + 1])
+
+    # Plot parallel efficiency
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [1, 1],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.9, 0.9],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.75, 0.75],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.5, 0.5],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    for i in range(0, numSeries):
+        parallelEffPlot.plot(threadList[0], parallelEff[i], linestyle[i])
+
+    parallelEffPlot.set_xscale("log")
+    parallelEffPlot.set_ylabel("${\\rm Parallel~efficiency}$", labelpad=0.0)
+    parallelEffPlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    parallelEffPlot.set_ylim([0, 1.1])
+    parallelEffPlot.set_xlim([0.9, 10 ** (np.floor(np.log10(threadList[0][-1])) + 0.5)])
+
+    # Plot time to solution
+    for i in range(0, numOfSeries):
+        pts = [1, 10 ** np.floor(np.log10(threadList[i][-1]) + 1)]
+        totalTimePlot.loglog(pts, totalTime[i][0] / pts, "k--", lw=1.0, color="0.2")
+        totalTimePlot.loglog(
+            threadList[i], totalTime[i], linestyle[i], label=version[i]
+        )
+
+    y_min = 10 ** np.floor(np.log10(np.min(totalTime[:][0]) * 0.6))
+    y_max = 1.0 * 10 ** np.floor(np.log10(np.max(totalTime[:][0]) * 1.5) + 1)
+    totalTimePlot.set_xscale("log")
+    totalTimePlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    totalTimePlot.set_ylabel("${\\rm Time~to~solution}~[{\\rm ms}]$", labelpad=0.0)
+    totalTimePlot.set_xlim([0.9, 10 ** (np.floor(np.log10(threadList[0][-1])) + 0.5)])
+    totalTimePlot.set_ylim(y_min, y_max)
+
+    totalTimePlot.legend(
+        bbox_to_anchor=(1.21, 0.97),
+        loc=2,
+        borderaxespad=0.0,
+        prop={"size": 12},
+        frameon=False,
+        title=legendTitle,
+    )
+    emptyPlot.axis("off")
+
+    for i, txt in enumerate(threadList[0]):
+        if (
+            2 ** np.floor(np.log2(threadList[0][i])) == threadList[0][i]
+        ):  # only powers of 2
+            speedUpPlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], speedUp[0][i]),
+                (threadList[0][i], speedUp[0][i] + 0.3),
+                color=hexcols[0],
+            )
+            parallelEffPlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], parallelEff[0][i]),
+                (threadList[0][i], parallelEff[0][i] + 0.02),
+                color=hexcols[0],
+            )
+            totalTimePlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], totalTime[0][i]),
+                (threadList[0][i], totalTime[0][i] * 1.1),
+                color=hexcols[0],
+            )
+
+    # fig.suptitle("Thread Speed Up, Parallel Efficiency and Time To Solution for {} Time Steps of Cosmo Volume\n Cmd Line: {}, Platform: {}".format(numTimesteps),cmdLine,platform))
+    fig.suptitle(
+        "${\\rm Speed\\textendash up,~parallel~efficiency~and~time~to~solution~for}~%d~{\\rm time\\textendash steps}$"
+        % numTimesteps,
+        fontsize=16,
+    )
+
+    return
+
+
+# Calculate results
+(totalTime, speedUp, parallelEff) = parse_files()
+
+legendTitle = version[0]
+
+plot_results(totalTime, speedUp, parallelEff, numOfSeries)
+
+print_results(totalTime, parallelEff, version)
+
+# And plot
+plt.show()
diff --git a/tools/plot_scaling_results_breakdown.py b/tools/plot_scaling_results_breakdown.py
new file mode 100755
index 0000000000000000000000000000000000000000..570ec37ee908dbbe51bfc12fa2c3af59d2d8800a
--- /dev/null
+++ b/tools/plot_scaling_results_breakdown.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python
+#
+# Usage:
+#  python plot_scaling_results.py input-file1-ext input-file2-ext ...
+#
+# Description:
+# Plots speed up, parallel efficiency and time to solution given a "timesteps" output file generated by SWIFT.
+#
+# Example:
+# python plot_scaling_results.py _hreads_cosma_stdout.txt _threads_knl_stdout.txt
+#
+# The working directory should contain files 1_threads_cosma_stdout.txt - 64_threads_cosma_stdout.txt and 1_threads_knl_stdout.txt - 64_threads_knl_stdout.txt, i.e wall clock time for each run using a given number of threads
+
+import sys
+import glob
+import re
+import numpy as np
+import matplotlib.pyplot as plt
+import scipy.stats
+import ntpath
+
+params = {
+    "axes.labelsize": 14,
+    "axes.titlesize": 18,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 14,
+    "ytick.labelsize": 14,
+    "text.usetex": True,
+    "figure.subplot.left": 0.055,
+    "figure.subplot.right": 0.98,
+    "figure.subplot.bottom": 0.05,
+    "figure.subplot.top": 0.95,
+    "figure.subplot.wspace": 0.14,
+    "figure.subplot.hspace": 0.12,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+    "text.latex.unicode": True,
+}
+plt.rcParams.update(params)
+plt.rc("font", **{"family": "sans-serif", "sans-serif": ["Times"]})
+
+version = []
+branch = []
+revision = []
+hydro_scheme = []
+hydro_kernel = []
+hydro_neighbours = []
+hydro_eta = []
+threadList = []
+hexcols = [
+    "#332288",
+    "#88CCEE",
+    "#44AA99",
+    "#117733",
+    "#999933",
+    "#DDCC77",
+    "#CC6677",
+    "#882255",
+    "#AA4499",
+    "#661100",
+    "#6699CC",
+    "#AA4466",
+    "#4477AA",
+]
+linestyle = (
+    hexcols[0],
+    hexcols[1],
+    hexcols[3],
+    hexcols[5],
+    hexcols[6],
+    hexcols[8],
+    hexcols[2],
+    hexcols[4],
+    hexcols[7],
+    hexcols[9],
+)
+numTimesteps = 0
+legendTitle = " "
+
+inputFileNames = []
+
+# Work out how many data series there are
+if len(sys.argv) == 1:
+    print("Please specify an input file in the arguments.")
+    sys.exit()
+else:
+    for fileName in sys.argv[1:]:
+        inputFileNames.append(fileName)
+    numOfSeries = int(len(sys.argv) - 1)
+
+# Get the names of the branch, Git revision, hydro scheme and hydro kernel
+def parse_header(inputFile):
+    with open(inputFile, "r") as f:
+        found_end = False
+        for line in f:
+            if "Branch:" in line:
+                s = line.split()
+                line = s[2:]
+                branch.append(" ".join(line))
+            elif "Revision:" in line:
+                s = line.split()
+                revision.append(s[2])
+            elif "Hydrodynamic scheme:" in line:
+                line = line[2:-1]
+                s = line.split()
+                line = s[2:]
+                hydro_scheme.append(" ".join(line))
+            elif "Hydrodynamic kernel:" in line:
+                line = line[2:-1]
+                s = line.split()
+                line = s[2:5]
+                hydro_kernel.append(" ".join(line))
+            elif "neighbours:" in line:
+                s = line.split()
+                hydro_neighbours.append(s[4])
+            elif "Eta:" in line:
+                s = line.split()
+                hydro_eta.append(s[2])
+    return
+
+
+# Parse file and return total time taken, speed up and parallel efficiency
+def parse_files():
+
+    totalTime = []
+    sumTotal = []
+    speedUp = []
+    parallelEff = []
+
+    for i in range(0, numOfSeries):  # Loop over each data series
+
+        # Get path to set of files
+        path, name = ntpath.split(inputFileNames[i])
+
+        # Get each file that starts with the cmd line arg
+        file_list = glob.glob(inputFileNames[i] + "*")
+
+        threadList.append([])
+
+        # Remove path from file names
+        for j in range(0, len(file_list)):
+            p, filename = ntpath.split(file_list[j])
+            file_list[j] = filename
+
+        # Create a list of threads using the list of files
+        for fileName in file_list:
+            s = re.split(r"[_.]+", fileName)
+            threadList[i].append(int(s[1]))
+
+        # Re-add path once each file has been found
+        if len(path) != 0:
+            for j in range(0, len(file_list)):
+                file_list[j] = path + "/" + file_list[j]
+
+        # Sort the thread list in ascending order and save the indices
+        sorted_indices = np.argsort(threadList[i])
+        threadList[i].sort()
+
+        # Sort the file list in ascending order acording to the thread number
+        file_list = [file_list[j] for j in sorted_indices]
+
+        parse_header(file_list[0])
+
+        branch[i] = branch[i].replace("_", "\\_")
+
+        # version.append("$\\textrm{%s}$"%str(branch[i]))# + " " + revision[i])# + "\n" + hydro_scheme[i] +
+        #                   "\n" + hydro_kernel[i] + r", $N_{ngb}=%d$"%float(hydro_neighbours[i]) +
+        #                   r", $\eta=%.3f$"%float(hydro_eta[i]))
+        totalTime.append([])
+        speedUp.append([])
+        parallelEff.append([])
+
+        # Loop over all files for a given series and load the times
+        for j in range(0, len(file_list)):
+            times = np.loadtxt(file_list[j], usecols=(9,))
+            updates = np.loadtxt(file_list[j], usecols=(6,))
+            totalTime[i].append(np.sum(times))
+
+        sumTotal.append(np.sum(totalTime[i]))
+
+    # Sort the total times in descending order
+    sorted_indices = np.argsort(sumTotal)[::-1]
+
+    totalTime = [totalTime[j] for j in sorted_indices]
+    branchNew = [branch[j] for j in sorted_indices]
+
+    for i in range(0, numOfSeries):
+        version.append("$\\textrm{%s}$" % str(branchNew[i]))
+
+    global numTimesteps
+    numTimesteps = len(times)
+
+    # Find speed-up and parallel efficiency
+    for i in range(0, numOfSeries):
+        for j in range(0, len(file_list)):
+            speedUp[i].append(totalTime[i][0] / totalTime[i][j])
+            parallelEff[i].append(speedUp[i][j] / threadList[i][j])
+
+    return (totalTime, speedUp, parallelEff)
+
+
+def print_results(totalTime, parallelEff, version):
+
+    for i in range(0, numOfSeries):
+        print(" ")
+        print("------------------------------------")
+        print(version[i])
+        print("------------------------------------")
+        print("Wall clock time for: {} time steps".format(numTimesteps))
+        print("------------------------------------")
+
+        for j in range(0, len(threadList[i])):
+            print(str(threadList[i][j]) + " threads: {}".format(totalTime[i][j]))
+
+        print(" ")
+        print("------------------------------------")
+        print("Parallel Efficiency for: {} time steps".format(numTimesteps))
+        print("------------------------------------")
+
+        for j in range(0, len(threadList[i])):
+            print(str(threadList[i][j]) + " threads: {}".format(parallelEff[i][j]))
+
+    return
+
+
+# Returns a lighter/darker version of the colour
+def color_variant(hex_color, brightness_offset=1):
+
+    rgb_hex = [hex_color[x : x + 2] for x in [1, 3, 5]]
+    new_rgb_int = [int(hex_value, 16) + brightness_offset for hex_value in rgb_hex]
+    new_rgb_int = [
+        min([255, max([0, i])]) for i in new_rgb_int
+    ]  # make sure new values are between 0 and 255
+    # hex() produces "0x88", we want just "88"
+
+    return "#" + "".join([hex(i)[2:] for i in new_rgb_int])
+
+
+def plot_results(totalTime, speedUp, parallelEff, numSeries):
+
+    fig, axarr = plt.subplots(2, 2, figsize=(10, 10), frameon=True)
+    speedUpPlot = axarr[0, 0]
+    parallelEffPlot = axarr[0, 1]
+    totalTimePlot = axarr[1, 0]
+    emptyPlot = axarr[1, 1]
+
+    # Plot speed up
+    speedUpPlot.plot(threadList[0], threadList[0], linestyle="--", lw=1.5, color="0.2")
+    for i in range(0, numSeries):
+        speedUpPlot.plot(threadList[0], speedUp[i], linestyle[i], label=version[i])
+
+    speedUpPlot.set_ylabel("${\\rm Speed\\textendash up}$", labelpad=0.0)
+    speedUpPlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    speedUpPlot.set_xlim([0.7, threadList[0][-1] + 1])
+    speedUpPlot.set_ylim([0.7, threadList[0][-1] + 1])
+
+    # Plot parallel efficiency
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [1, 1],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.9, 0.9],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.75, 0.75],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    parallelEffPlot.plot(
+        [threadList[0][0], 10 ** np.floor(np.log10(threadList[0][-1]) + 1)],
+        [0.5, 0.5],
+        "k--",
+        lw=1.5,
+        color="0.2",
+    )
+    for i in range(0, numSeries):
+        parallelEffPlot.plot(threadList[0], parallelEff[i], linestyle[i])
+
+    parallelEffPlot.set_xscale("log")
+    parallelEffPlot.set_ylabel("${\\rm Parallel~efficiency}$", labelpad=0.0)
+    parallelEffPlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    parallelEffPlot.set_ylim([0, 1.1])
+    parallelEffPlot.set_xlim([0.9, 10 ** (np.floor(np.log10(threadList[0][-1])) + 0.5)])
+
+    # Plot time to solution
+    for i in range(0, numSeries):
+        for j in range(0, len(threadList[0])):
+            totalTime[i][j] = totalTime[i][j] * threadList[i][j]
+            if i > 1:
+                totalTime[i][j] = totalTime[i][j] + totalTime[i - 1][j]
+        totalTimePlot.plot(threadList[0], totalTime[i], linestyle[i], label=version[i])
+
+        if i > 1:
+            colour = color_variant(linestyle[i], 100)
+            totalTimePlot.fill_between(
+                threadList[0],
+                np.array(totalTime[i]),
+                np.array(totalTime[i - 1]),
+                facecolor=colour,
+            )
+        elif i == 1:
+            colour = color_variant(linestyle[i], 100)
+            totalTimePlot.fill_between(threadList[0], totalTime[i], facecolor=colour)
+
+    totalTimePlot.set_xscale("log")
+    totalTimePlot.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
+    totalTimePlot.set_xlabel("${\\rm Threads}$", labelpad=0.0)
+    totalTimePlot.set_ylabel(
+        "${\\rm Time~to~solution~x~No.~of~cores}~[{\\rm ms}]$", labelpad=0.0
+    )
+    totalTimePlot.set_xlim([0.9, 10 ** (np.floor(np.log10(threadList[0][-1])) + 0.5)])
+    # totalTimePlot.set_ylim(y_min, y_max)
+
+    totalTimePlot.legend(
+        bbox_to_anchor=(1.21, 0.97),
+        loc=2,
+        borderaxespad=0.0,
+        prop={"size": 12},
+        frameon=False,
+        title=legendTitle,
+    )
+    emptyPlot.axis("off")
+
+    for i, txt in enumerate(threadList[0]):
+        if (
+            2 ** np.floor(np.log2(threadList[0][i])) == threadList[0][i]
+        ):  # only powers of 2
+            speedUpPlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], speedUp[0][i]),
+                (threadList[0][i], speedUp[0][i] + 0.3),
+                color=hexcols[0],
+            )
+            parallelEffPlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], parallelEff[0][i]),
+                (threadList[0][i], parallelEff[0][i] + 0.02),
+                color=hexcols[0],
+            )
+            totalTimePlot.annotate(
+                "$%s$" % txt,
+                (threadList[0][i], totalTime[0][i]),
+                (threadList[0][i], totalTime[0][i] * 1.1),
+                color=hexcols[0],
+            )
+
+    # fig.suptitle("Thread Speed Up, Parallel Efficiency and Time To Solution for {} Time Steps of Cosmo Volume\n Cmd Line: {}, Platform: {}".format(numTimesteps),cmdLine,platform))
+    fig.suptitle(
+        "${\\rm Speed\\textendash up,~parallel~efficiency~and~time~to~solution~x~no.~of~cores~for}~%d~{\\rm time\\textendash steps}$"
+        % numTimesteps,
+        fontsize=16,
+    )
+
+    return
+
+
+# Calculate results
+(totalTime, speedUp, parallelEff) = parse_files()
+
+legendTitle = version[0]
+
+plot_results(totalTime, speedUp, parallelEff, numOfSeries)
+
+print_results(totalTime, parallelEff, version)
+
+# And plot
+plt.show()
diff --git a/tools/plot_task_dependencies.py b/tools/plot_task_dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..14ba7c99f621c0c62c3c9cb8e8d3b0c78e491401
--- /dev/null
+++ b/tools/plot_task_dependencies.py
@@ -0,0 +1,522 @@
+#!/usr/bin/env python3
+"""
+This file generates a graphviz file that represents the SWIFT tasks
+ dependencies.
+
+Example: ./plot_task_dependencies.py dependency_graph_*.csv
+"""
+from pandas import read_csv
+import numpy as np
+from subprocess import call
+from optparse import OptionParser
+
+
+def parseOption():
+    parser = OptionParser()
+
+    parser.add_option(
+        "-c", "--with-calls", dest="with_calls",
+        help="Add the function calls in the graph",
+        action="store_true")
+
+    opt, files = parser.parse_args()
+    if len(files) != 1:
+        raise Exception("You need to provide one file")
+
+    return opt, files
+
+
+def getGitVersion(f, git):
+    """
+    Read the git version from the file
+
+    Parameters
+    ----------
+
+    f: str
+        Filename
+
+    git: str
+        Git version of previous file
+
+    Returns
+    -------
+
+    new_git: str
+        Git version of current file
+    """
+    # read comment in csv file
+    with open(f, "r") as f:
+        line = f.readline()
+
+    # check if really a comment
+    if line[0] != "#":
+        return None
+
+    # remove trailing characters
+    new_git = line[2:].rstrip()
+
+    # check if previous and current are the same
+    if git is not None and git != new_git:
+        raise Exception("Files were not produced by the same version")
+
+    return new_git
+
+
+def appendSingleData(data0, datai):
+    """
+    Append two DataFrame together
+
+    Parameters
+    ----------
+
+    data0: DataFrame
+        One of the dataframe
+
+    datai: DataFrame
+        The second dataframe
+
+    Returns
+    -------
+
+    data0: DataFrame
+        The updated dataframe
+    """
+
+    # loop over all rows in datai
+    for i, row in datai.iterrows():
+        # get data
+        ta = datai["task_in"][i]
+        tb = datai["task_out"][i]
+        ind = np.logical_and(data0["task_in"] == ta,
+                             data0["task_out"] == tb)
+
+        # check number of ta->tb
+        N = np.sum(ind)
+        if N > 1:
+            raise Exception("Same dependency written multiple times %s->%s" %
+                            (ta, tb))
+        # if not present in data0
+        if N == 0:
+            data0.append(row)
+        else:
+            # otherwise just update the number of link
+            ind = ind[ind].index[0]
+            tmp = data0["number_link"][ind] + datai["number_link"][i]
+            data0.at[ind, "number_link"] = tmp
+
+    return data0
+
+
+def appendData(data):
+    """
+    Append all the dataframe together
+
+    Parameters
+    ----------
+
+    data: list
+        List containing all the dataframe to append together
+
+    Returns
+    -------
+
+    data: DataFrame
+        The complete dataframe
+    """
+    N = len(data)
+    if N == 1:
+        return data[0]
+
+    # add number link to data[0]
+    for i in range(N-1):
+        i += 1
+        data[0] = appendSingleData(data[0], data[i])
+
+    return data[0]
+
+
+def taskIsStars(name):
+    """
+    Does the task concern stars?
+
+    Parameters
+    ----------
+
+    name: str
+        Task name
+    """
+    if "stars" in name or "spart" in name:
+        return True
+    return False
+
+
+def taskIsHydro(name):
+    """
+    Does the task concern the hydro?
+
+    Parameters
+    ----------
+
+    name: str
+        Task name
+    """
+    if "_part" in name:
+        return True
+    if "density" in name and "stars" not in name:
+        return True
+    if "rho" in name:
+        return True
+    if "gradient" in name:
+        return True
+    if "force" in name:
+        return True
+    if "xv" in name:
+        return True
+
+    task_name = [
+        "sort",
+        "ghost_in",
+        "ghost",
+        "ghost_out",
+        "extra_ghost",
+        "cooling",
+        "star_formation"
+    ]
+    if name in task_name:
+        return True
+    return False
+
+
+def taskIsGravity(name):
+    """
+    Does the task concern the gravity?
+
+    Parameters
+    ----------
+
+    name: str
+        Task name
+    """
+    if "gpart" in name:
+        return True
+    if "grav" in name:
+        return True
+    return False
+
+
+def getFunctionCalls(name):
+    txt = None
+    if name == "ghost":
+        txt = """hydro_end_density, chemistry_end_density,<br/>
+        hydro_prepare_gradient, hydro_reset_gradient,<br/>
+        hydro_prepare_force, hydro_reset_acceleration,<br/>
+        hydro_init_part, chemistry_init_part,<br/>
+        hydro_has_no_neighbours, chemistry_part_has_no_neighbours
+        """
+
+    elif name == "cooling":
+        txt = "cooling_cool_part"
+
+    elif name == "timestep":
+        txt = "tracers_after_timestep"
+
+    elif name == "drift_part":
+        txt = """drift_part, tracers_after_drift,<br/>
+        hydro_init_part, chemistry_init_part,<br/>
+        tracers_after_init
+        """
+
+    elif name == "kick1":
+        txt = "kick_part, kick_gpart, kick_spart"
+
+    elif name == "kick2":
+        txt = """kick_part, kick_gpart, kick_spart,<br/>
+        hydro_reset_predicted_values,
+        gravity_reset_predicted_Values,<br/>
+        stars_reset_predicted_values,
+        """
+
+    elif name == "end_force":
+        txt = """hydro_end_force, gravity_end_force,<br/>
+        stars_end_force"""
+
+    elif name == "drift_gpart":
+        txt = """drift_gpart, gravity_init_gpart,<br/>
+        drift_spart
+        """
+
+    if "density" in name and "stars" not in name:
+        txt = """runner_iact_nonsym_chemistry, runner_iact_chemistry,<br/>
+        runner_iact_nonsym_density, runner_iact_density"""
+
+    if "force" in name and "end" not in name:
+        txt = "runner_iact_nonsym_density, runner_iact_density"
+
+    if txt is None:
+        return None
+    else:
+        pre = "<" + name + "<BR/> <Font POINT-SIZE='10'>Calls: "
+        app = "</Font>>"
+        return pre + txt + app
+
+
+def writeTask(f, name, implicit, mpi, with_calls):
+    """
+    Write the special task (e.g. implicit and mpi)
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+
+    name: str
+        Task name
+
+    implicit: int
+        Is the task implicit
+
+    mpi: int
+        Is the task MPI related
+
+    with_calls: bool
+        if true, write down the function calls
+    """
+    # generate text
+    txt = "\t " + name + "["
+
+    if implicit:
+        txt += "style=filled,fillcolor=lightgrey,"
+    if mpi:
+        txt += "shape=diamond,"
+
+    if taskIsStars(name):
+        txt += "color=darkorange1,"
+
+    if taskIsHydro(name):
+        txt += "color=blue3,"
+
+    if taskIsGravity(name):
+        txt += "color=red3,"
+
+    if with_calls:
+        func = getFunctionCalls(name)
+        if func is not None:
+            txt += "label=" + func + ","
+
+    # remove extra ','
+    if txt[-1] == ",":
+        txt = txt[:-1]
+    txt += "];\n"
+
+    # write it
+    f.write(txt)
+
+
+def writeHeader(f, data, git, opt):
+    """
+    Write the header and the special tasks
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+
+    data: DataFrame
+        The dataframe to write
+
+    git: str
+        The git version
+
+    opt: object
+        The options provided to this script
+    """
+    # write header
+    f.write("digraph task_dep {\n")
+    f.write("\t # Header\n")
+    f.write('\t label="Task dependencies for SWIFT %s";\n' % git)
+    f.write("\t compound=true;\n")
+    f.write("\t ratio=0.66;\n")
+    f.write("\t node[nodesep=0.15];\n")
+
+    f.write("\n")
+
+    # write the special task
+    f.write("\t # Special tasks\n")
+    N = len(data)
+    written = []
+    # do task in
+    for i in range(N):
+        ta = data["task_in"][i]
+        if ta in written:
+            continue
+
+        written.append(ta)
+        writeTask(f, ta, data["implicit_in"][i], data["mpi_in"][i],
+                  opt.with_calls)
+
+    # do task out
+    for i in range(N):
+        tb = data["task_out"][i]
+        if tb in written:
+            continue
+
+        written.append(tb)
+        writeTask(f, tb, data["implicit_out"][i], data["mpi_out"][i],
+                  opt.with_calls)
+
+    f.write("\n")
+
+
+def writeCluster(f, tasks, cluster):
+    """
+    Write a single cluster
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+
+    tasks: list
+        List of all tasks in the cluster
+
+    cluster: str
+        Cluster name
+    """
+    f.write("\t subgraph cluster%s {\n" % cluster)
+    f.write('\t\t label="";\n')
+    for t in tasks:
+        f.write("\t\t %s;\n" % t)
+    f.write("\t };\n\n")
+
+
+def writeClusters(f, data):
+    """
+    Write all the clusters
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+
+    data: DataFrame
+        The dataframe to write
+    """
+    f.write("\t # Clusters\n")
+    # get list of all the clusters
+    clusters = data[["cluster_in", "cluster_out"]]
+    clusters = np.unique(clusters)
+
+    cluster_in = data["cluster_in"]
+    cluster_out = data["cluster_out"]
+    # loop over all clusters
+    for cluster in clusters:
+        # is it a cluster?
+        if cluster == "None":
+            continue
+
+        # get all the task in current cluster
+        ta = data["task_in"][cluster_in == cluster]
+        tb = data["task_out"][cluster_out == cluster]
+
+        # make them unique
+        tasks = np.append(ta, tb)
+        tasks = np.unique(tasks)
+
+        # write current cluster
+        writeCluster(f, tasks, cluster)
+
+    f.write("\n")
+
+
+def writeDependencies(f, data):
+    """
+    Write all the dependencies between tasks
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+
+    data: DataFrame
+        The dataframe to write
+
+    """
+    f.write("\t # Dependencies\n")
+    N = len(data)
+    written = []
+    max_rank = data["number_rank"].max()
+    for i in range(N):
+        # get data
+        ta = data["task_in"][i]
+        tb = data["task_out"][i]
+        number_link = data["number_link"][i]
+
+        # check if already done
+        name = "%s_%s" % (ta, tb)
+        if name in written:
+            raise Exception("Found two same task dependencies")
+
+        written.append(name)
+
+        # write relation
+        arrow = ""
+        if data["number_rank"][i] != max_rank:
+            arrow = ",style=dashed"
+        f.write("\t %s->%s[label=%i%s]\n" %
+                (ta, tb, number_link, arrow))
+
+
+def writeFooter(f):
+    """
+    Write the footer
+
+    Parameters
+    ----------
+
+    f: File
+        File where to write the data
+    """
+    f.write("}")
+
+
+if __name__ == "__main__":
+
+    opt, files = parseOption()
+
+    # output
+    dot_output = "dependency_graph.dot"
+    png_output = "dependency_graph.png"
+
+    # read files
+    data = []
+    git = None
+    for f in files:
+        tmp = read_csv(f, delimiter=",", comment="#")
+        git = getGitVersion(f, git)
+        data.append(tmp)
+
+    data = appendData(data)
+
+    # write output
+    with open(dot_output, "w") as f:
+        writeHeader(f, data, git, opt)
+
+        writeClusters(f, data)
+
+        writeDependencies(f, data)
+
+        writeFooter(f)
+
+    call(["dot", "-Tpng", dot_output, "-o", png_output])
+
+    print("You will find the graph in %s" % png_output)
+
+    if opt.with_calls:
+        print("We recommand to use the python package xdot available on pypi:")
+        print("  python -m xdot %s" % dot_output)
diff --git a/tools/plot_task_level.py b/tools/plot_task_level.py
new file mode 100755
index 0000000000000000000000000000000000000000..23e3ec878a2b8ef0f4d3c56d91ef75026e012de8
--- /dev/null
+++ b/tools/plot_task_level.py
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+"""
+Usage:
+  ./plot_task_level.py task_level.txt
+
+Description:
+  Plot the number of tasks for each depth level and each type of task.
+"""
+
+
+import pandas as pd
+import matplotlib.pyplot as plt
+import sys
+
+# get filename
+filename = sys.argv[-1]
+
+# Column names
+names = ["type", "subtype", "depth", "count"]
+
+# read file
+data = pd.read_csv(filename, sep=" ", comment="#", names=names)
+
+# generate color map
+cmap = plt.get_cmap("hsv")
+N = data["depth"].max() + 5
+
+# plot data
+for i in range(data["depth"].max()):
+    ind = data["depth"] == i
+    label = "depth = %i" % i
+    c = cmap(i / N)
+    plt.plot(
+        data["type"][ind] + "_" + data["subtype"][ind],
+        data["count"][ind],
+        ".",
+        label=label,
+        color=c,
+    )
+
+# modify figure parameters and show it
+plt.gca().set_yscale("log")
+plt.xticks(rotation=45)
+plt.ylabel("Number of Tasks")
+plt.gcf().subplots_adjust(bottom=0.15)
+plt.legend()
+plt.show()
diff --git a/examples/process_cells b/tools/process_cells
similarity index 100%
rename from examples/process_cells
rename to tools/process_cells
diff --git a/examples/process_cells_helper b/tools/process_cells_helper
similarity index 100%
rename from examples/process_cells_helper
rename to tools/process_cells_helper
diff --git a/tools/task_plots/analyse_tasks.py b/tools/task_plots/analyse_tasks.py
new file mode 100755
index 0000000000000000000000000000000000000000..fc9df0e4797cfb16e883df551af30dc0d3244edc
--- /dev/null
+++ b/tools/task_plots/analyse_tasks.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+"""
+Usage:
+    analyse_tasks.py [options] input.dat
+
+where input.dat is a thread info file for a step (MPI or non-MPI). Use the
+'-y interval' flag of the swift and swift_mpi commands to create these
+(you will also need to configure with the --enable-task-debugging option).
+
+The output is an analysis of the task timings, including deadtime per thread
+and step, total amount of time spent for each task type, for the whole step
+and per thread and the minimum and maximum times spent per task type.
+
+This file is part of SWIFT.
+Copyright (c) 2017 Peter W. Draper (p.w.draper@durham.ac.uk)
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import matplotlib
+
+matplotlib.use("Agg")
+import matplotlib.collections as collections
+import matplotlib.ticker as plticker
+import pylab as pl
+import sys
+import argparse
+
+#  Handle the command line.
+parser = argparse.ArgumentParser(description="Analyse task dumps")
+
+parser.add_argument("input", help="Thread data file (-y output)")
+parser.add_argument(
+    "-v",
+    "--verbose",
+    dest="verbose",
+    help="Verbose output (default: False)",
+    default=False,
+    action="store_true",
+)
+parser.add_argument(
+    "-r",
+    "--rank",
+    dest="rank",
+    help="Rank to process (default: all)",
+    default="all",
+    action="store",
+)
+
+args = parser.parse_args()
+infile = args.input
+
+#  Tasks and subtypes. Indexed as in tasks.h.
+TASKTYPES = [
+    "none",
+    "sort",
+    "self",
+    "pair",
+    "sub_self",
+    "sub_pair",
+    "init_grav",
+    "init_grav_out",
+    "ghost_in",
+    "ghost",
+    "ghost_out",
+    "extra_ghost",
+    "drift_part",
+    "drift_spart",
+    "drift_gpart",
+    "drift_gpart_out",
+    "hydro_end_force",
+    "kick1",
+    "kick2",
+    "timestep",
+    "timestep_limiter",
+    "send",
+    "recv",
+    "grav_long_range",
+    "grav_mm",
+    "grav_down_in",
+    "grav_down",
+    "grav_mesh",
+    "grav_end_force",
+    "cooling",
+    "star_formation",
+    "logger",
+    "stars_in",
+    "stars_out",
+    "stars_ghost_in",
+    "stars_ghost",
+    "stars_ghost_out",
+    "stars_sort",
+    "count",
+]
+
+SUBTYPES = [
+    "none",
+    "density",
+    "gradient",
+    "force",
+    "limiter",
+    "grav",
+    "external_grav",
+    "tend",
+    "xv",
+    "rho",
+    "gpart",
+    "multipole",
+    "spart",
+    "stars_density",
+    "stars_feedback",
+    "count",
+]
+
+SIDS = [
+    "(-1,-1,-1)",
+    "(-1,-1, 0)",
+    "(-1,-1, 1)",
+    "(-1, 0,-1)",
+    "(-1, 0, 0)",
+    "(-1, 0, 1)",
+    "(-1, 1,-1)",
+    "(-1, 1, 0)",
+    "(-1, 1, 1)",
+    "( 0,-1,-1)",
+    "( 0,-1, 0)",
+    "( 0,-1, 1)",
+    "( 0, 0,-1)",
+]
+
+#  Read input.
+data = pl.loadtxt(infile)
+full_step = data[0, :]
+
+#  Do we have an MPI file?
+full_step = data[0, :]
+if full_step.size == 13:
+    print("# MPI mode")
+    mpimode = True
+    nranks = int(max(data[:, 0])) + 1
+    print("# Number of ranks:", nranks)
+    rankcol = 0
+    threadscol = 1
+    taskcol = 2
+    subtaskcol = 3
+    ticcol = 5
+    toccol = 6
+    updates = int(full_step[7])
+    g_updates = int(full_step[8])
+    s_updates = int(full_step[9])
+else:
+    print("# non MPI mode")
+    nranks = 1
+    mpimode = False
+    rankcol = -1
+    threadscol = 0
+    taskcol = 1
+    subtaskcol = 2
+    ticcol = 4
+    toccol = 5
+    updates = int(full_step[6])
+    g_updates = int(full_step[7])
+    s_updates = int(full_step[8])
+
+#  Get the CPU clock to convert ticks into milliseconds.
+CPU_CLOCK = float(full_step[-1]) / 1000.0
+if args.verbose:
+    print("# CPU frequency:", CPU_CLOCK * 1000.0)
+print("#   updates:", updates)
+print("# g_updates:", g_updates)
+print("# s_updates:", s_updates)
+
+if mpimode:
+    if args.rank == "all":
+        ranks = list(range(nranks))
+    else:
+        ranks = [int(args.rank)]
+        if ranks[0] >= nranks:
+            print("Error: maximum rank is " + str(nranks - 1))
+            sys.exit(1)
+else:
+    ranks = [1]
+
+maxthread = int(max(data[:, threadscol])) + 1
+print("# Maximum thread id:", maxthread)
+
+#  Avoid start and end times of zero.
+sdata = data[data[:, ticcol] != 0]
+sdata = data[data[:, toccol] != 0]
+
+#  Now we process the required ranks.
+for rank in ranks:
+    if mpimode:
+        print("# Rank", rank)
+        data = sdata[sdata[:, rankcol] == rank]
+        full_step = data[0, :]
+    else:
+        data = sdata
+
+    #  Recover the start and end time
+    tic_step = int(full_step[ticcol])
+    toc_step = int(full_step[toccol])
+    data = data[1:, :]
+
+    #  Avoid start and end times of zero.
+    data = data[data[:, ticcol] != 0]
+    data = data[data[:, toccol] != 0]
+
+    #  Calculate the time range.
+    total_t = (toc_step - tic_step) / CPU_CLOCK
+    print("# Data range: ", total_t, "ms")
+    print()
+
+    #  Correct times to relative values.
+    start_t = float(tic_step)
+    data[:, ticcol] -= start_t
+    data[:, toccol] -= start_t
+    end_t = (toc_step - start_t) / CPU_CLOCK
+
+    tasks = {}
+    tasks[-1] = []
+    for i in range(maxthread):
+        tasks[i] = []
+
+    #  Gather into by thread data.
+    num_lines = pl.shape(data)[0]
+    for line in range(num_lines):
+        thread = int(data[line, threadscol])
+        tic = int(data[line, ticcol]) / CPU_CLOCK
+        toc = int(data[line, toccol]) / CPU_CLOCK
+        tasktype = int(data[line, taskcol])
+        subtype = int(data[line, subtaskcol])
+        sid = int(data[line, -1])
+
+        tasks[thread].append([tic, toc, tasktype, subtype, sid])
+
+    #  Sort by tic and gather used threads.
+    threadids = []
+    for i in range(maxthread):
+        tasks[i] = sorted(tasks[i], key=lambda task: task[0])
+        threadids.append(i)
+
+    #  Times per task.
+    print("# Task times:")
+    print("# -----------")
+    print(
+        "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}".format(
+            "type/subtype", "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+
+    alltasktimes = {}
+    sidtimes = {}
+    for i in threadids:
+        tasktimes = {}
+        for task in tasks[i]:
+            key = TASKTYPES[task[2]] + "/" + SUBTYPES[task[3]]
+            dt = task[1] - task[0]
+            if not key in tasktimes:
+                tasktimes[key] = []
+            tasktimes[key].append(dt)
+
+            if not key in alltasktimes:
+                alltasktimes[key] = []
+            alltasktimes[key].append(dt)
+
+            my_sid = task[4]
+            if my_sid > -1:
+                if not my_sid in sidtimes:
+                    sidtimes[my_sid] = []
+                sidtimes[my_sid].append(dt)
+
+        print("# Thread : ", i)
+        for key in sorted(tasktimes.keys()):
+            taskmin = min(tasktimes[key])
+            taskmax = max(tasktimes[key])
+            tasksum = sum(tasktimes[key])
+            print(
+                "{0:19s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+                    key,
+                    len(tasktimes[key]),
+                    taskmin,
+                    taskmax,
+                    tasksum,
+                    tasksum / len(tasktimes[key]),
+                    tasksum / total_t * 100.0,
+                )
+            )
+        print()
+
+    print("# All threads : ")
+    for key in sorted(alltasktimes.keys()):
+        taskmin = min(alltasktimes[key])
+        taskmax = max(alltasktimes[key])
+        tasksum = sum(alltasktimes[key])
+        print(
+            "{0:18s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+                key,
+                len(alltasktimes[key]),
+                taskmin,
+                taskmax,
+                tasksum,
+                tasksum / len(alltasktimes[key]),
+                tasksum / (len(threadids) * total_t) * 100.0,
+            )
+        )
+    print()
+
+    # For pairs, show stuff sorted by SID
+    print("# By SID (all threads): ")
+    print(
+        "# {0:<17s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}".format(
+            "Pair/Sub-pair SID", "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+
+    for sid in range(0, 13):
+        if sid in sidtimes:
+            sidmin = min(sidtimes[sid])
+            sidmax = max(sidtimes[sid])
+            sidsum = sum(sidtimes[sid])
+            sidcount = len(sidtimes[sid])
+            sidmean = sidsum / sidcount
+        else:
+            sidmin = 0.0
+            sidmax = 0.0
+            sidsum = 0.0
+            sidcount = 0
+            sidmean = 0.0
+        print(
+            "{0:3d} {1:15s}: {2:7d} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.4f} {7:9.2f}".format(
+                sid,
+                SIDS[sid],
+                sidcount,
+                sidmin,
+                sidmax,
+                sidsum,
+                sidmean,
+                sidsum / (len(threadids) * total_t) * 100.0,
+            )
+        )
+    print()
+
+    #  Dead times.
+    print("# Times not in tasks (deadtimes)")
+    print("# ------------------------------")
+    print("# Time before first task:")
+    print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
+    predeadtimes = []
+    for i in threadids:
+        if len(tasks[i]) > 0:
+            predeadtime = tasks[i][0][0]
+            print(
+                "thread {0:2d}: {1:9.4f} {2:9.4f}".format(
+                    i, predeadtime, predeadtime / total_t * 100.0
+                )
+            )
+            predeadtimes.append(predeadtime)
+        else:
+            predeadtimes.append(0.0)
+
+    predeadmin = min(predeadtimes)
+    predeadmax = max(predeadtimes)
+    predeadsum = sum(predeadtimes)
+    print(
+        "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+            "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+    print(
+        "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+            len(predeadtimes),
+            predeadmin,
+            predeadmax,
+            predeadsum,
+            predeadsum / len(predeadtimes),
+            predeadsum / (len(threadids) * total_t) * 100.0,
+        )
+    )
+    print()
+
+    print("# Time after last task:")
+    print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
+    postdeadtimes = []
+    for i in threadids:
+        if len(tasks[i]) > 0:
+            postdeadtime = total_t - tasks[i][-1][1]
+            print(
+                "thread {0:2d}: {1:9.4f} {2:9.4f}".format(
+                    i, postdeadtime, postdeadtime / total_t * 100.0
+                )
+            )
+            postdeadtimes.append(postdeadtime)
+        else:
+            postdeadtimes.append(0.0)
+
+    postdeadmin = min(postdeadtimes)
+    postdeadmax = max(postdeadtimes)
+    postdeadsum = sum(postdeadtimes)
+    print(
+        "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+            "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+    print(
+        "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+            len(postdeadtimes),
+            postdeadmin,
+            postdeadmax,
+            postdeadsum,
+            postdeadsum / len(postdeadtimes),
+            postdeadsum / (len(threadids) * total_t) * 100.0,
+        )
+    )
+    print()
+
+    #  Time in engine, i.e. from first to last tasks.
+    print("# Time between tasks (engine deadtime):")
+    print(
+        "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+            "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+    enginedeadtimes = []
+    for i in threadids:
+        deadtimes = []
+        if len(tasks[i]) > 0:
+            last = tasks[i][0][0]
+        else:
+            last = 0.0
+        for task in tasks[i]:
+            dt = task[0] - last
+            deadtimes.append(dt)
+            last = task[1]
+
+        #  Drop first value, last value already gone.
+        if len(deadtimes) > 1:
+            deadtimes = deadtimes[1:]
+        else:
+            #  Only one or fewer tasks, so no deadtime by definition.
+            deadtimes = [0.0]
+
+        deadmin = min(deadtimes)
+        deadmax = max(deadtimes)
+        deadsum = sum(deadtimes)
+        print(
+            "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+                i,
+                len(deadtimes),
+                deadmin,
+                deadmax,
+                deadsum,
+                deadsum / len(deadtimes),
+                deadsum / total_t * 100.0,
+            )
+        )
+        enginedeadtimes.extend(deadtimes)
+
+    deadmin = min(enginedeadtimes)
+    deadmax = max(enginedeadtimes)
+    deadsum = sum(enginedeadtimes)
+    print(
+        "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+            len(enginedeadtimes),
+            deadmin,
+            deadmax,
+            deadsum,
+            deadsum / len(enginedeadtimes),
+            deadsum / (len(threadids) * total_t) * 100.0,
+        )
+    )
+    print()
+
+    #  All times in step.
+    print("# All deadtimes:")
+    print(
+        "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+            "count", "minimum", "maximum", "sum", "mean", "percent"
+        )
+    )
+    alldeadtimes = []
+    for i in threadids:
+        deadtimes = []
+        last = 0
+        for task in tasks[i]:
+            dt = task[0] - last
+            deadtimes.append(dt)
+            last = task[1]
+        dt = total_t - last
+        deadtimes.append(dt)
+
+        deadmin = min(deadtimes)
+        deadmax = max(deadtimes)
+        deadsum = sum(deadtimes)
+        print(
+            "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+                i,
+                len(deadtimes),
+                deadmin,
+                deadmax,
+                deadsum,
+                deadsum / len(deadtimes),
+                deadsum / total_t * 100.0,
+            )
+        )
+        alldeadtimes.extend(deadtimes)
+
+    deadmin = min(alldeadtimes)
+    deadmax = max(alldeadtimes)
+    deadsum = sum(alldeadtimes)
+    print(
+        "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+            len(alldeadtimes),
+            deadmin,
+            deadmax,
+            deadsum,
+            deadsum / len(alldeadtimes),
+            deadsum / (len(threadids) * total_t) * 100.0,
+        )
+    )
+    print()
+
+sys.exit(0)
diff --git a/examples/analyse_threadpool_tasks.py b/tools/task_plots/analyse_threadpool_tasks.py
similarity index 53%
rename from examples/analyse_threadpool_tasks.py
rename to tools/task_plots/analyse_threadpool_tasks.py
index 609af363b4110e010d6714bef6862d40e5acb278..af8d88dc1d4dc319fe7506d604e550de22a55a81 100755
--- a/examples/analyse_threadpool_tasks.py
+++ b/tools/task_plots/analyse_threadpool_tasks.py
@@ -29,6 +29,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
 import matplotlib
+
 matplotlib.use("Agg")
 import matplotlib.collections as collections
 import matplotlib.ticker as plticker
@@ -40,23 +41,28 @@ import argparse
 parser = argparse.ArgumentParser(description="Analyse task dumps")
 
 parser.add_argument("input", help="Threadpool data file (-y output)")
-parser.add_argument("-v", "--verbose", dest="verbose",
-                    help="Verbose output (default: False)",
-                    default=False, action="store_true")
+parser.add_argument(
+    "-v",
+    "--verbose",
+    dest="verbose",
+    help="Verbose output (default: False)",
+    default=False,
+    action="store_true",
+)
 
 args = parser.parse_args()
 infile = args.input
 
 #  Read header. First two lines.
 with open(infile) as infid:
-    head = [next(infid) for x in xrange(2)]
+    head = [next(infid) for x in range(2)]
 header = head[1][2:].strip()
 header = eval(header)
-nthread = int(header['num_threads']) + 1
-CPU_CLOCK = float(header['cpufreq']) / 1000.0
-print "Number of threads: ", nthread - 1
+nthread = int(header["num_threads"]) + 1
+CPU_CLOCK = float(header["cpufreq"]) / 1000.0
+print("Number of threads: ", nthread - 1)
 if args.verbose:
-    print "CPU frequency:", CPU_CLOCK * 1000.0
+    print("CPU frequency:", CPU_CLOCK * 1000.0)
 
 #  Read input.
 data = pl.genfromtxt(infile, dtype=None, delimiter=" ")
@@ -71,7 +77,7 @@ for i in data:
     if i[0] != "#":
         funcs.append(i[0].replace("_mapper", ""))
         if i[1] < 0:
-            threads.append(nthread-1)
+            threads.append(nthread - 1)
         else:
             threads.append(i[1])
         chunks.append(i[2])
@@ -88,9 +94,9 @@ tic_step = min(tics)
 toc_step = max(tocs)
 
 #  Calculate the time range.
-total_t = (toc_step - tic_step)/ CPU_CLOCK
-print "# Data range: ", total_t, "ms"
-print
+total_t = (toc_step - tic_step) / CPU_CLOCK
+print("# Data range: ", total_t, "ms")
+print()
 
 #  Correct times to relative millisecs.
 start_t = float(tic_step)
@@ -104,7 +110,7 @@ for i in range(nthread):
 
 #  Gather into by thread data.
 for i in range(len(tics)):
-    tasks[threads[i]].append([tics[i],tocs[i],funcs[i]])
+    tasks[threads[i]].append([tics[i], tocs[i], funcs[i]])
 
 #  Don't actually process the fake thread.
 nthread = nthread - 1
@@ -117,11 +123,13 @@ for i in range(nthread):
         threadids.append(i)
 
 #  Times per task.
-print "# Task times:"
-print "# -----------"
-print "# {0:<31s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}"\
-      .format("type/subtype", "count","minimum", "maximum",
-              "sum", "mean", "percent")
+print("# Task times:")
+print("# -----------")
+print(
+    "# {0:<31s}: {1:>7s} {2:>9s} {3:>9s} {4:>9s} {5:>9s} {6:>9s}".format(
+        "type/subtype", "count", "minimum", "maximum", "sum", "mean", "percent"
+    )
+)
 alltasktimes = {}
 sidtimes = {}
 for i in threadids:
@@ -137,74 +145,116 @@ for i in threadids:
             alltasktimes[key] = []
         alltasktimes[key].append(dt)
 
-    print "# Thread : ", i
+    print("# Thread : ", i)
     for key in sorted(tasktimes.keys()):
         taskmin = min(tasktimes[key])
         taskmax = max(tasktimes[key])
         tasksum = sum(tasktimes[key])
-        print "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-              .format(key, len(tasktimes[key]), taskmin, taskmax, tasksum,
-                      tasksum / len(tasktimes[key]), tasksum / total_t * 100.0)
-    print
-
-print "# All threads : "
+        print(
+            "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+                key,
+                len(tasktimes[key]),
+                taskmin,
+                taskmax,
+                tasksum,
+                tasksum / len(tasktimes[key]),
+                tasksum / total_t * 100.0,
+            )
+        )
+    print()
+
+print("# All threads : ")
 for key in sorted(alltasktimes.keys()):
     taskmin = min(alltasktimes[key])
     taskmax = max(alltasktimes[key])
     tasksum = sum(alltasktimes[key])
-    print "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-          .format(key, len(alltasktimes[key]), taskmin, taskmax, tasksum,
-                  tasksum / len(alltasktimes[key]),
-                  tasksum / (len(threadids) * total_t) * 100.0)
-print
+    print(
+        "{0:33s}: {1:7d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+            key,
+            len(alltasktimes[key]),
+            taskmin,
+            taskmax,
+            tasksum,
+            tasksum / len(alltasktimes[key]),
+            tasksum / (len(threadids) * total_t) * 100.0,
+        )
+    )
+print()
 
 #  Dead times.
-print "# Times not in tasks (deadtimes)"
-print "# ------------------------------"
-print "# Time before first task:"
-print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+print("# Times not in tasks (deadtimes)")
+print("# ------------------------------")
+print("# Time before first task:")
+print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
 predeadtimes = []
 for i in threadids:
     predeadtime = tasks[i][0][0]
-    print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-          .format(i, predeadtime, predeadtime / total_t * 100.0)
+    print(
+        "thread {0:2d}: {1:9.4f} {2:9.4f}".format(
+            i, predeadtime, predeadtime / total_t * 100.0
+        )
+    )
     predeadtimes.append(predeadtime)
 
 predeadmin = min(predeadtimes)
 predeadmax = max(predeadtimes)
 predeadsum = sum(predeadtimes)
-print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-      .format(len(predeadtimes), predeadmin, predeadmax, predeadsum,
-              predeadsum / len(predeadtimes),
-              predeadsum / (len(threadids) * total_t ) * 100.0)
-print
-
-print "# Time after last task:"
-print "# no.    : {0:>9s} {1:>9s}".format("value", "percent")
+print(
+    "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+        "count", "minimum", "maximum", "sum", "mean", "percent"
+    )
+)
+print(
+    "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+        len(predeadtimes),
+        predeadmin,
+        predeadmax,
+        predeadsum,
+        predeadsum / len(predeadtimes),
+        predeadsum / (len(threadids) * total_t) * 100.0,
+    )
+)
+print()
+
+print("# Time after last task:")
+print("# no.    : {0:>9s} {1:>9s}".format("value", "percent"))
 postdeadtimes = []
 for i in threadids:
     postdeadtime = total_t - tasks[i][-1][1]
-    print "thread {0:2d}: {1:9.4f} {2:9.4f}"\
-          .format(i, postdeadtime, postdeadtime / total_t * 100.0)
+    print(
+        "thread {0:2d}: {1:9.4f} {2:9.4f}".format(
+            i, postdeadtime, postdeadtime / total_t * 100.0
+        )
+    )
     postdeadtimes.append(postdeadtime)
 
 postdeadmin = min(postdeadtimes)
 postdeadmax = max(postdeadtimes)
 postdeadsum = sum(postdeadtimes)
-print "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-      .format(len(postdeadtimes), postdeadmin, postdeadmax, postdeadsum,
-              postdeadsum / len(postdeadtimes),
-              postdeadsum / (len(threadids) * total_t ) * 100.0)
-print
+print(
+    "#        : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+        "count", "minimum", "maximum", "sum", "mean", "percent"
+    )
+)
+print(
+    "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+        len(postdeadtimes),
+        postdeadmin,
+        postdeadmax,
+        postdeadsum,
+        postdeadsum / len(postdeadtimes),
+        postdeadsum / (len(threadids) * total_t) * 100.0,
+    )
+)
+print()
 
 #  Time in threadpool, i.e. from first to last tasks.
-print "# Time between tasks (threadpool deadtime):"
-print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
+print("# Time between tasks (threadpool deadtime):")
+print(
+    "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+        "count", "minimum", "maximum", "sum", "mean", "percent"
+    )
+)
 threadpooldeadtimes = []
 for i in threadids:
     deadtimes = []
@@ -224,24 +274,41 @@ for i in threadids:
     deadmin = min(deadtimes)
     deadmax = max(deadtimes)
     deadsum = sum(deadtimes)
-    print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-          .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
+    print(
+        "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+            i,
+            len(deadtimes),
+            deadmin,
+            deadmax,
+            deadsum,
+            deadsum / len(deadtimes),
+            deadsum / total_t * 100.0,
+        )
+    )
     threadpooldeadtimes.extend(deadtimes)
 
 deadmin = min(threadpooldeadtimes)
 deadmax = max(threadpooldeadtimes)
 deadsum = sum(threadpooldeadtimes)
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-      .format(len(threadpooldeadtimes), deadmin, deadmax, deadsum,
-              deadsum / len(threadpooldeadtimes),
-              deadsum / (len(threadids) * total_t ) * 100.0)
-print
+print(
+    "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+        len(threadpooldeadtimes),
+        deadmin,
+        deadmax,
+        deadsum,
+        deadsum / len(threadpooldeadtimes),
+        deadsum / (len(threadids) * total_t) * 100.0,
+    )
+)
+print()
 
 #  All times in step.
-print "# All deadtimes:"
-print "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}"\
-      .format("count", "minimum", "maximum", "sum", "mean", "percent")
+print("# All deadtimes:")
+print(
+    "# no.    : {0:>9s} {1:>9s} {2:>9s} {3:>9s} {4:>9s} {5:>9s}".format(
+        "count", "minimum", "maximum", "sum", "mean", "percent"
+    )
+)
 alldeadtimes = []
 for i in threadids:
     deadtimes = []
@@ -256,18 +323,32 @@ for i in threadids:
     deadmin = min(deadtimes)
     deadmax = max(deadtimes)
     deadsum = sum(deadtimes)
-    print "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}"\
-          .format(i, len(deadtimes), deadmin, deadmax, deadsum,
-                  deadsum / len(deadtimes), deadsum / total_t * 100.0)
+    print(
+        "thread {0:2d}: {1:9d} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.4f} {6:9.2f}".format(
+            i,
+            len(deadtimes),
+            deadmin,
+            deadmax,
+            deadsum,
+            deadsum / len(deadtimes),
+            deadsum / total_t * 100.0,
+        )
+    )
     alldeadtimes.extend(deadtimes)
 
 deadmin = min(alldeadtimes)
 deadmax = max(alldeadtimes)
 deadsum = sum(alldeadtimes)
-print "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}"\
-      .format(len(alldeadtimes), deadmin, deadmax, deadsum,
-              deadsum / len(alldeadtimes),
-              deadsum / (len(threadids) * total_t ) * 100.0)
-print
+print(
+    "all      : {0:9d} {1:9.4f} {2:9.4f} {3:9.4f} {4:9.4f} {5:9.2f}".format(
+        len(alldeadtimes),
+        deadmin,
+        deadmax,
+        deadsum,
+        deadsum / len(alldeadtimes),
+        deadsum / (len(threadids) * total_t) * 100.0,
+    )
+)
+print()
 
 sys.exit(0)
diff --git a/examples/plot_tasks.py b/tools/task_plots/plot_tasks.py
similarity index 53%
rename from examples/plot_tasks.py
rename to tools/task_plots/plot_tasks.py
index 9eecf6f4ca15148f544ea48cb65c97cd3802a48d..54f34b2f828895d894b84253e366173827c03158 100755
--- a/examples/plot_tasks.py
+++ b/tools/task_plots/plot_tasks.py
@@ -53,30 +53,67 @@ parser = argparse.ArgumentParser(description="Plot task graphs")
 
 parser.add_argument("input", help="Thread data file (-y output)")
 parser.add_argument("outbase", help="Base name for output graphic files (PNG)")
-parser.add_argument("-l", "--limit", dest="limit",
-                    help="Upper time limit in millisecs (def: depends on data)",
-                    default=0, type=float)
-parser.add_argument("-e", "--expand", dest="expand",
-                    help="Thread expansion factor (def: 1)",
-                    default=1, type=int)
-parser.add_argument("--height", dest="height",
-                    help="Height of plot in inches (def: 4)",
-                    default=4., type=float)
-parser.add_argument("--width", dest="width",
-                    help="Width of plot in inches (def: 16)",
-                    default=16., type=float)
-parser.add_argument("--nolegend", dest="nolegend",
-                    help="Whether to show the legend (def: False)",
-                    default=False, action="store_true")
-parser.add_argument("-v", "--verbose", dest="verbose",
-                    help="Show colour assignments and other details (def: False)",
-                    default=False, action="store_true")
-parser.add_argument("-r", "--ranks", dest="ranks",
-                    help="Comma delimited list of ranks to process, if MPI in effect",
-                    default=None, type=str)
-parser.add_argument("-m", "--mintic", dest="mintic",
-                    help="Value of the smallest tic (def: least in input file)",
-                    default=-1, type=int)
+parser.add_argument(
+    "-l",
+    "--limit",
+    dest="limit",
+    help="Upper time limit in millisecs (def: depends on data)",
+    default=0,
+    type=float,
+)
+parser.add_argument(
+    "-e",
+    "--expand",
+    dest="expand",
+    help="Thread expansion factor (def: 1)",
+    default=1,
+    type=int,
+)
+parser.add_argument(
+    "--height",
+    dest="height",
+    help="Height of plot in inches (def: 4)",
+    default=4.0,
+    type=float,
+)
+parser.add_argument(
+    "--width",
+    dest="width",
+    help="Width of plot in inches (def: 16)",
+    default=16.0,
+    type=float,
+)
+parser.add_argument(
+    "--nolegend",
+    dest="nolegend",
+    help="Whether to show the legend (def: False)",
+    default=False,
+    action="store_true",
+)
+parser.add_argument(
+    "-v",
+    "--verbose",
+    dest="verbose",
+    help="Show colour assignments and other details (def: False)",
+    default=False,
+    action="store_true",
+)
+parser.add_argument(
+    "-r",
+    "--ranks",
+    dest="ranks",
+    help="Comma delimited list of ranks to process, if MPI in effect",
+    default=None,
+    type=str,
+)
+parser.add_argument(
+    "-m",
+    "--mintic",
+    dest="mintic",
+    help="Value of the smallest tic (def: least in input file)",
+    default=-1,
+    type=int,
+)
 
 args = parser.parse_args()
 infile = args.input
@@ -85,55 +122,175 @@ delta_t = args.limit
 expand = args.expand
 mintic = args.mintic
 if args.ranks != None:
-    ranks = [int(item) for item in args.ranks.split(',')]
+    ranks = [int(item) for item in args.ranks.split(",")]
 else:
     ranks = None
 
 #  Basic plot configuration.
-PLOT_PARAMS = {"axes.labelsize": 10,
-               "axes.titlesize": 10,
-               "font.size": 12,
-               "legend.fontsize": 12,
-               "xtick.labelsize": 10,
-               "ytick.labelsize": 10,
-               "figure.figsize" : (args.width, args.height),
-               "figure.subplot.left" : 0.03,
-               "figure.subplot.right" : 0.995,
-               "figure.subplot.bottom" : 0.1,
-               "figure.subplot.top" : 0.99,
-               "figure.subplot.wspace" : 0.,
-               "figure.subplot.hspace" : 0.,
-               "lines.markersize" : 6,
-               "lines.linewidth" : 3.
-               }
+PLOT_PARAMS = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "figure.figsize": (args.width, args.height),
+    "figure.subplot.left": 0.03,
+    "figure.subplot.right": 0.995,
+    "figure.subplot.bottom": 0.1,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.0,
+    "figure.subplot.hspace": 0.0,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+}
 pl.rcParams.update(PLOT_PARAMS)
 
 #  Tasks and subtypes. Indexed as in tasks.h.
-TASKTYPES = ["none", "sort", "self", "pair", "sub_self", "sub_pair",
-             "init_grav", "init_grav_out", "ghost_in", "ghost", "ghost_out", "extra_ghost", "drift_part", "drift_gpart",
-             "end_force", "kick1", "kick2", "timestep", "send", "recv", "grav_long_range", "grav_mm", "grav_down_in", 
-             "grav_down", "grav_mesh", "cooling", "sourceterms", "count"]
-
-SUBTYPES = ["none", "density", "gradient", "force", "grav", "external_grav",
-            "tend", "xv", "rho", "gpart", "multipole", "spart", "count"]
+TASKTYPES = [
+    "none",
+    "sort",
+    "self",
+    "pair",
+    "sub_self",
+    "sub_pair",
+    "init_grav",
+    "init_grav_out",
+    "ghost_in",
+    "ghost",
+    "ghost_out",
+    "extra_ghost",
+    "drift_part",
+    "drift_spart",
+    "drift_gpart",
+    "drift_gpart_out",
+    "hydro_end_force",
+    "kick1",
+    "kick2",
+    "timestep",
+    "timestep_limiter",
+    "send",
+    "recv",
+    "grav_long_range",
+    "grav_mm",
+    "grav_down_in",
+    "grav_down",
+    "grav_mesh",
+    "grav_end_force",
+    "cooling",
+    "star_formation",
+    "logger",
+    "stars_in",
+    "stars_out",
+    "stars_ghost_in",
+    "stars_ghost",
+    "stars_ghost_out",
+    "stars_sort",
+    "count",
+]
+
+SUBTYPES = [
+    "none",
+    "density",
+    "gradient",
+    "force",
+    "limiter",
+    "grav",
+    "external_grav",
+    "tend",
+    "xv",
+    "rho",
+    "gpart",
+    "multipole",
+    "spart",
+    "stars_density",
+    "stars_feedback",
+    "count",
+]
 
 #  Task/subtypes of interest.
-FULLTYPES = ["self/force", "self/density", "self/grav", "sub_self/force",
-             "sub_self/density", "pair/force", "pair/density", "pair/grav",
-             "sub_pair/force",
-             "sub_pair/density", "recv/xv", "send/xv", "recv/rho", "send/rho",
-             "recv/tend", "send/tend", "recv/gpart", "send/gpart"]
+FULLTYPES = [
+    "self/limiter",
+    "self/force",
+    "self/gradient",
+    "self/density",
+    "self/grav",
+    "sub_self/limiter",
+    "sub_self/force",
+    "sub_self/gradient",
+    "sub_self/density",
+    "pair/limiter",
+    "pair/force",
+    "pair/gradient",
+    "pair/density",
+    "pair/grav",
+    "sub_pair/limiter",
+    "sub_pair/force",
+    "sub_pair/gradient",
+    "sub_pair/density",
+    "recv/xv",
+    "send/xv",
+    "recv/rho",
+    "send/rho",
+    "recv/tend",
+    "send/tend",
+    "recv/gpart",
+    "send/gpart",
+    "recv/spart",
+    "send/spart",
+    "self/stars_density",
+    "pair/stars_density",
+    "sub_self/stars_density",
+    "sub_pair/stars_density",
+    "self/stars_feedback",
+    "pair/stars_feedback",
+    "sub_self/stars_feedback",
+    "sub_pair/stars_feedback",
+]
 
 #  A number of colours for the various types. Recycled when there are
 #  more task types than colours...
-colours = ["cyan", "lightgray", "darkblue", "yellow", "tan", "dodgerblue",
-           "sienna", "aquamarine", "bisque", "blue", "green", "lightgreen",
-           "brown", "purple", "moccasin", "olivedrab", "chartreuse",
-           "darksage", "darkgreen", "green", "mediumseagreen",
-           "mediumaquamarine", "darkslategrey", "mediumturquoise",
-           "black", "cadetblue", "skyblue", "red", "slategray", "gold",
-           "slateblue", "blueviolet", "mediumorchid", "firebrick",
-           "magenta", "hotpink", "pink", "orange", "lightgreen"]
+colours = [
+    "cyan",
+    "lightgray",
+    "darkblue",
+    "yellow",
+    "tan",
+    "dodgerblue",
+    "sienna",
+    "aquamarine",
+    "bisque",
+    "blue",
+    "green",
+    "lightgreen",
+    "brown",
+    "purple",
+    "moccasin",
+    "olivedrab",
+    "chartreuse",
+    "olive",
+    "darkgreen",
+    "green",
+    "mediumseagreen",
+    "mediumaquamarine",
+    "darkslategrey",
+    "mediumturquoise",
+    "black",
+    "cadetblue",
+    "skyblue",
+    "red",
+    "slategray",
+    "gold",
+    "slateblue",
+    "blueviolet",
+    "mediumorchid",
+    "firebrick",
+    "magenta",
+    "hotpink",
+    "pink",
+    "orange",
+    "lightgreen",
+]
 maxcolours = len(colours)
 
 #  Set colours of task/subtype.
@@ -154,23 +311,23 @@ for task in SUBTYPES:
 
 #  For fiddling with colours...
 if args.verbose:
-    print "#Selected colours:"
+    print("#Selected colours:")
     for task in sorted(TASKCOLOURS.keys()):
-        print "# " + task + ": " + TASKCOLOURS[task]
+        print(("# " + task + ": " + TASKCOLOURS[task]))
     for task in sorted(SUBCOLOURS.keys()):
-        print "# " + task + ": " + SUBCOLOURS[task]
+        print(("# " + task + ": " + SUBCOLOURS[task]))
 
 #  Read input.
-data = pl.loadtxt( infile )
+data = pl.loadtxt(infile)
 
 #  Do we have an MPI file?
-full_step = data[0,:]
+full_step = data[0, :]
 if full_step.size == 13:
-    print "# MPI mode"
+    print("# MPI mode")
     mpimode = True
     if ranks == None:
-        ranks = range(int(max(data[:,0])) + 1)
-    print "# Number of ranks:", len(ranks)
+        ranks = list(range(int(max(data[:, 0])) + 1))
+    print(("# Number of ranks:", len(ranks)))
     rankcol = 0
     threadscol = 1
     taskcol = 2
@@ -178,7 +335,7 @@ if full_step.size == 13:
     ticcol = 5
     toccol = 6
 else:
-    print "# non MPI mode"
+    print("# non MPI mode")
     ranks = [0]
     mpimode = False
     rankcol = -1
@@ -191,14 +348,14 @@ else:
 #  Get CPU_CLOCK to convert ticks into milliseconds.
 CPU_CLOCK = float(full_step[-1]) / 1000.0
 if args.verbose:
-    print "# CPU frequency:", CPU_CLOCK * 1000.0
+    print(("# CPU frequency:", CPU_CLOCK * 1000.0))
 
-nthread = int(max(data[:,threadscol])) + 1
-print "# Number of threads:", nthread
+nthread = int(max(data[:, threadscol])) + 1
+print(("# Number of threads:", nthread))
 
 # Avoid start and end times of zero.
-sdata = data[data[:,ticcol] != 0]
-sdata = sdata[sdata[:,toccol] != 0]
+sdata = data[data[:, ticcol] != 0]
+sdata = sdata[sdata[:, toccol] != 0]
 
 # Each rank can have different clocks (compute node), but we want to use the
 # same delta times range for comparisons, so we suck it up and take the hit of
@@ -207,8 +364,8 @@ delta_t = delta_t * CPU_CLOCK
 if delta_t == 0:
     for rank in ranks:
         if mpimode:
-            data = sdata[sdata[:,rankcol] == rank]
-            full_step = data[0,:]
+            data = sdata[sdata[:, rankcol] == rank]
+            full_step = data[0, :]
 
         #  Start and end times for this rank. Can be changed using the mintic
         #  option. This moves our zero time to other time. Useful for
@@ -221,28 +378,31 @@ if delta_t == 0:
         dt = toc_step - tic_step
         if dt > delta_t:
             delta_t = dt
-    print "# Data range: ", delta_t / CPU_CLOCK, "ms"
+    print(("# Data range: ", delta_t / CPU_CLOCK, "ms"))
 
 # Once more doing the real gather and plots this time.
 for rank in ranks:
-    print "# Processing rank: ", rank
+    print(("# Processing rank: ", rank))
     if mpimode:
-        data = sdata[sdata[:,rankcol] == rank]
-        full_step = data[0,:]
+        data = sdata[sdata[:, rankcol] == rank]
+        full_step = data[0, :]
     tic_step = int(full_step[ticcol])
     toc_step = int(full_step[toccol])
-    print "# Min tic = ", tic_step
-    data = data[1:,:]
+    print(("# Min tic = ", tic_step))
+    data = data[1:, :]
     typesseen = []
     nethread = 0
 
     #  Dummy image for ranks that have no tasks.
     if data.size == 0:
-        print "# Rank ", rank, " has no tasks"
+        print(("# Rank ", rank, " has no tasks"))
         fig = pl.figure()
-        ax = fig.add_subplot(1,1,1)
+        ax = fig.add_subplot(1, 1, 1)
         ax.set_xlim(-delta_t * 0.01 / CPU_CLOCK, delta_t * 1.01 / CPU_CLOCK)
-        ax.set_ylim(0, nthread*expand)
+        if nthread == 0:
+            ax.set_ylim(0, expand)
+        else:
+            ax.set_ylim(0, nthread * expand)
         if mintic < 0:
             start_t = tic_step
         else:
@@ -254,13 +414,13 @@ for rank in ranks:
             start_t = float(tic_step)
         else:
             start_t = float(mintic)
-        data[:,ticcol] -= start_t
-        data[:,toccol] -= start_t
+        data[:, ticcol] -= start_t
+        data[:, toccol] -= start_t
         end_t = (toc_step - start_t) / CPU_CLOCK
 
         tasks = {}
         tasks[-1] = []
-        for i in range(nthread*expand):
+        for i in range(nthread * expand):
             tasks[i] = []
 
         # Counters for each thread when expanding.
@@ -278,15 +438,20 @@ for rank in ranks:
             thread = ethread
 
             tasks[thread].append({})
-            tasktype = TASKTYPES[int(data[line,taskcol])]
-            subtype = SUBTYPES[int(data[line,subtaskcol])]
+            tasktype = TASKTYPES[int(data[line, taskcol])]
+            subtype = SUBTYPES[int(data[line, subtaskcol])]
             tasks[thread][-1]["type"] = tasktype
             tasks[thread][-1]["subtype"] = subtype
-            tic = int(data[line,ticcol]) / CPU_CLOCK
-            toc = int(data[line,toccol]) / CPU_CLOCK
+            tic = int(data[line, ticcol]) / CPU_CLOCK
+            toc = int(data[line, toccol]) / CPU_CLOCK
             tasks[thread][-1]["tic"] = tic
             tasks[thread][-1]["toc"] = toc
-            if "self" in tasktype or "pair" in tasktype or "recv" in tasktype or "send" in tasktype:
+            if (
+                "self" in tasktype
+                or "pair" in tasktype
+                or "recv" in tasktype
+                or "send" in tasktype
+            ):
                 fulltype = tasktype + "/" + subtype
                 if fulltype in SUBCOLOURS:
                     tasks[thread][-1]["colour"] = SUBCOLOURS[fulltype]
@@ -300,9 +465,9 @@ for rank in ranks:
 
         typesseen = []
         fig = pl.figure()
-        ax = fig.add_subplot(1,1,1)
+        ax = fig.add_subplot(1, 1, 1)
         ax.set_xlim(-delta_t * 0.01 / CPU_CLOCK, delta_t * 1.01 / CPU_CLOCK)
-        ax.set_ylim(0, nethread)
+        ax.set_ylim(0.5, nethread + 1.0)
         for i in range(nethread):
 
             #  Collect ranges and colours into arrays.
@@ -324,46 +489,47 @@ for rank in ranks:
                     typesseen.append(qtask)
 
             #  Now plot.
-            ax.broken_barh(tictocs, [i+0.05,0.90], facecolors = colours, linewidth=0)
-
+            ax.broken_barh(tictocs, [i + 0.55, 0.9], facecolors=colours, linewidth=0)
 
     #  Legend and room for it.
-    nrow = len(typesseen) / 5
-    ax.fill_between([0, 0], nethread+0.5, nethread + nrow + 0.5, facecolor="white")
-    ax.set_ylim(0, nethread + 0.5)
+    nrow = len(typesseen) / 8
+    ax.fill_between([0, 0], nethread, nethread + nrow, facecolor="white")
     if data.size > 0 and not args.nolegend:
-        ax.fill_between([0, 0], nethread+0.5, nethread + nrow + 0.5, facecolor="white")
-        ax.set_ylim(0, nethread + 0.5)
-        ax.legend(loc=1, shadow=True, bbox_to_anchor=(0., 1.05 ,1., 0.2), mode="expand", ncol=5)
-        box = ax.get_position()
-        ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
+        ax.fill_between([0, 0], nethread, nethread + nrow, facecolor="white")
+        ax.legend(
+            loc="lower left",
+            shadow=True,
+            bbox_to_anchor=(0.0, 1.0, 1.0, 0.2),
+            mode="expand",
+            ncol=8,
+        )
 
     # Start and end of time-step
     if mintic < 0:
-        ax.plot([0, 0], [0, nethread + nrow + 1], 'k--', linewidth=1)
+        ax.plot([0, 0], [0, nethread + nrow + 1], "k--", linewidth=1)
     else:
         real_start = tic_step - mintic
-        ax.plot([real_start, real_start], [0, nethread + nrow + 1], 'k--', linewidth=1)
-    ax.plot([end_t, end_t], [0, nethread + nrow + 1], 'k--', linewidth=1)
+        ax.plot([real_start, real_start], [0, nethread + nrow + 1], "k--", linewidth=1)
+    ax.plot([end_t, end_t], [0, nethread + nrow + 1], "k--", linewidth=1)
 
     ax.set_xlabel("Wall clock time [ms]")
 
     if expand == 1:
-        ax.set_ylabel("Thread ID" )
+        ax.set_ylabel("Thread ID")
     else:
-        ax.set_ylabel("Thread ID * " + str(expand) )
-    ax.set_yticks(pl.array(range(nethread)), True)
+        ax.set_ylabel("Thread ID * " + str(expand))
+    ax.set_yticks(pl.array(list(range(nethread))), True)
 
     loc = plticker.MultipleLocator(base=expand)
     ax.yaxis.set_major_locator(loc)
-    ax.grid(True, which='major', axis="y", linestyle="-")
+    ax.grid(True, which="major", axis="y", linestyle="-")
 
     pl.show()
     if mpimode:
         outpng = outbase + str(rank) + ".png"
     else:
         outpng = outbase + ".png"
-    pl.savefig(outpng)
-    print "Graphics done, output written to", outpng
+    pl.savefig(outpng, bbox_inches="tight")
+    print(("Graphics done, output written to", outpng))
 
 sys.exit(0)
diff --git a/examples/plot_threadpool.py b/tools/task_plots/plot_threadpool.py
similarity index 59%
rename from examples/plot_threadpool.py
rename to tools/task_plots/plot_threadpool.py
index bbcc8c23e4c4e5ed6b93055d7460d793f43d91fb..2e5521c901d0571665c6c6d7ec3297b0e9e60552 100755
--- a/examples/plot_threadpool.py
+++ b/tools/task_plots/plot_threadpool.py
@@ -31,6 +31,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
 import matplotlib
+
 matplotlib.use("Agg")
 import matplotlib.collections as collections
 import matplotlib.ticker as plticker
@@ -43,27 +44,59 @@ parser = argparse.ArgumentParser(description="Plot threadpool function graphs")
 
 parser.add_argument("input", help="Threadpool data file (-Y output)")
 parser.add_argument("outpng", help="Name for output graphic file (PNG)")
-parser.add_argument("-l", "--limit", dest="limit",
-                    help="Upper time limit in millisecs (def: depends on data)",
-                    default=0, type=float)
-parser.add_argument("-e", "--expand", dest="expand",
-                    help="Thread expansion factor (def: 1)",
-                    default=1, type=int)
-parser.add_argument("--height", dest="height",
-                    help="Height of plot in inches (def: 4)",
-                    default=4., type=float)
-parser.add_argument("--width", dest="width",
-                    help="Width of plot in inches (def: 16)",
-                    default=16., type=float)
-parser.add_argument("--nolegend", dest="nolegend",
-                    help="Whether to show the legend (def: False)",
-                    default=False, action="store_true")
-parser.add_argument("-v", "--verbose", dest="verbose",
-                    help="Show colour assignments and other details (def: False)",
-                    default=False, action="store_true")
-parser.add_argument("-m", "--mintic", dest="mintic",
-                    help="Value of the smallest tic (def: least in input file)",
-                    default=-1, type=int)
+parser.add_argument(
+    "-l",
+    "--limit",
+    dest="limit",
+    help="Upper time limit in millisecs (def: depends on data)",
+    default=0,
+    type=float,
+)
+parser.add_argument(
+    "-e",
+    "--expand",
+    dest="expand",
+    help="Thread expansion factor (def: 1)",
+    default=1,
+    type=int,
+)
+parser.add_argument(
+    "--height",
+    dest="height",
+    help="Height of plot in inches (def: 4)",
+    default=4.0,
+    type=float,
+)
+parser.add_argument(
+    "--width",
+    dest="width",
+    help="Width of plot in inches (def: 16)",
+    default=16.0,
+    type=float,
+)
+parser.add_argument(
+    "--nolegend",
+    dest="nolegend",
+    help="Whether to show the legend (def: False)",
+    default=False,
+    action="store_true",
+)
+parser.add_argument(
+    "-v",
+    "--verbose",
+    dest="verbose",
+    help="Show colour assignments and other details (def: False)",
+    default=False,
+    action="store_true",
+)
+parser.add_argument(
+    "-m",
+    "--mintic",
+    dest="mintic",
+    help="Value of the smallest tic (def: least in input file)",
+    default=-1,
+    type=int,
+)
 
 args = parser.parse_args()
 infile = args.input
@@ -73,46 +106,80 @@ expand = args.expand
 mintic = args.mintic
 
 #  Basic plot configuration.
-PLOT_PARAMS = {"axes.labelsize": 10,
-               "axes.titlesize": 10,
-               "font.size": 12,
-               "legend.fontsize": 12,
-               "xtick.labelsize": 10,
-               "ytick.labelsize": 10,
-               "figure.figsize" : (args.width, args.height),
-               "figure.subplot.left" : 0.03,
-               "figure.subplot.right" : 0.995,
-               "figure.subplot.bottom" : 0.09,
-               "figure.subplot.top" : 0.99,
-               "figure.subplot.wspace" : 0.,
-               "figure.subplot.hspace" : 0.,
-               "lines.markersize" : 6,
-               "lines.linewidth" : 3.
-               }
+PLOT_PARAMS = {
+    "axes.labelsize": 10,
+    "axes.titlesize": 10,
+    "font.size": 12,
+    "legend.fontsize": 12,
+    "xtick.labelsize": 10,
+    "ytick.labelsize": 10,
+    "figure.figsize": (args.width, args.height),
+    "figure.subplot.left": 0.03,
+    "figure.subplot.right": 0.995,
+    "figure.subplot.bottom": 0.09,
+    "figure.subplot.top": 0.99,
+    "figure.subplot.wspace": 0.0,
+    "figure.subplot.hspace": 0.0,
+    "lines.markersize": 6,
+    "lines.linewidth": 3.0,
+}
 pl.rcParams.update(PLOT_PARAMS)
 
 #  A number of colours for the various types. Recycled when there are
 #  more task types than colours...
-colours = ["cyan", "lightgray", "darkblue", "yellow", "tan", "dodgerblue",
-           "sienna", "aquamarine", "bisque", "blue", "green", "lightgreen",
-           "brown", "purple", "moccasin", "olivedrab", "chartreuse",
-           "darksage", "darkgreen", "green", "mediumseagreen",
-           "mediumaquamarine", "darkslategrey", "mediumturquoise",
-           "black", "cadetblue", "skyblue", "red", "slategray", "gold",
-           "slateblue", "blueviolet", "mediumorchid", "firebrick",
-           "magenta", "hotpink", "pink", "orange", "lightgreen"]
+colours = [
+    "cyan",
+    "lightgray",
+    "darkblue",
+    "yellow",
+    "tan",
+    "dodgerblue",
+    "sienna",
+    "aquamarine",
+    "bisque",
+    "blue",
+    "green",
+    "lightgreen",
+    "brown",
+    "purple",
+    "moccasin",
+    "olivedrab",
+    "chartreuse",
+    "olive",
+    "darkgreen",
+    "green",
+    "mediumseagreen",
+    "mediumaquamarine",
+    "darkslategrey",
+    "mediumturquoise",
+    "black",
+    "cadetblue",
+    "skyblue",
+    "red",
+    "slategray",
+    "gold",
+    "slateblue",
+    "blueviolet",
+    "mediumorchid",
+    "firebrick",
+    "magenta",
+    "hotpink",
+    "pink",
+    "orange",
+    "lightgreen",
+]
 maxcolours = len(colours)
 
 #  Read header. First two lines.
 with open(infile) as infid:
-    head = [next(infid) for x in xrange(2)]
+    head = [next(infid) for x in range(2)]
 header = head[1][2:].strip()
 header = eval(header)
-nthread = int(header['num_threads']) + 1
-CPU_CLOCK = float(header['cpufreq']) / 1000.0
-print "Number of threads: ", nthread
+nthread = int(header["num_threads"]) + 1
+CPU_CLOCK = float(header["cpufreq"]) / 1000.0
+print("Number of threads: ", nthread)
 if args.verbose:
-    print "CPU frequency:", CPU_CLOCK * 1000.0
+    print("CPU frequency:", CPU_CLOCK * 1000.0)
 
 #  Read input.
 data = pl.genfromtxt(infile, dtype=None, delimiter=" ")
@@ -127,7 +194,7 @@ for i in data:
     if i[0] != "#":
         funcs.append(i[0].replace("_mapper", ""))
         if i[1] < 0:
-            threads.append(nthread-1)
+            threads.append(nthread - 1)
         else:
             threads.append(i[1])
         chunks.append(i[2])
@@ -143,7 +210,7 @@ chunks = pl.array(chunks)
 mintic_step = min(tics)
 tic_step = mintic_step
 toc_step = max(tocs)
-print "# Min tic = ", mintic_step
+print("# Min tic = ", mintic_step)
 if mintic > 0:
     tic_step = mintic
 
@@ -153,7 +220,7 @@ if delta_t == 0:
     dt = toc_step - tic_step
     if dt > delta_t:
         delta_t = dt
-    print "Data range: ", delta_t / CPU_CLOCK, "ms"
+    print("Data range: ", delta_t / CPU_CLOCK, "ms")
 
 #  Once more doing the real gather and plots this time.
 start_t = float(tic_step)
@@ -163,7 +230,7 @@ end_t = (toc_step - start_t) / CPU_CLOCK
 
 #  Get all "task" names and assign colours.
 TASKTYPES = pl.unique(funcs)
-print TASKTYPES
+print(TASKTYPES)
 
 #  Set colours of task/subtype.
 TASKCOLOURS = {}
@@ -174,15 +241,15 @@ for task in TASKTYPES:
 
 #  For fiddling with colours...
 if args.verbose:
-    print "#Selected colours:"
+    print("#Selected colours:")
     for task in sorted(TASKCOLOURS.keys()):
-        print "# " + task + ": " + TASKCOLOURS[task]
+        print("# " + task + ": " + TASKCOLOURS[task])
     for task in sorted(SUBCOLOURS.keys()):
-        print "# " + task + ": " + SUBCOLOURS[task]
+        print("# " + task + ": " + SUBCOLOURS[task])
 
 tasks = {}
 tasks[-1] = []
-for i in range(nthread*expand):
+for i in range(nthread * expand):
     tasks[i] = []
 
 #  Counters for each thread when expanding.
@@ -211,7 +278,7 @@ nthread = nthread * expand
 
 typesseen = []
 fig = pl.figure()
-ax = fig.add_subplot(1,1,1)
+ax = fig.add_subplot(1, 1, 1)
 ax.set_xlim(-delta_t * 0.01 / CPU_CLOCK, delta_t * 1.01 / CPU_CLOCK)
 ax.set_ylim(0, nthread)
 
@@ -222,7 +289,7 @@ j = 0
 for task in tasks[nthread - expand]:
     tictocs.append((task["tic"], task["toc"] - task["tic"]))
     colours.append(task["colour"])
-ax.broken_barh(tictocs, [0,(nthread-1)], facecolors = colours, linewidth=0, alpha=0.15)
+ax.broken_barh(tictocs, [0, (nthread - 1)], facecolors=colours, linewidth=0, alpha=0.15)
 
 # And we don't plot the fake thread.
 nthread = nthread - expand
@@ -243,36 +310,38 @@ for i in range(nthread):
             typesseen.append(qtask)
 
     #  Now plot.
-    ax.broken_barh(tictocs, [i+0.05,0.90], facecolors = colours, linewidth=0)
+    ax.broken_barh(tictocs, [i + 0.05, 0.90], facecolors=colours, linewidth=0)
 
 #  Legend and room for it.
 nrow = len(typesseen) / 5
 if not args.nolegend:
-    ax.fill_between([0, 0], nthread+0.5, nthread + nrow + 0.5, facecolor="white")
+    ax.fill_between([0, 0], nthread + 0.5, nthread + nrow + 0.5, facecolor="white")
     ax.set_ylim(0, nthread + 0.5)
-    ax.legend(loc=1, shadow=True, bbox_to_anchor=(0., 1.05 ,1., 0.2), mode="expand", ncol=5)
+    ax.legend(
+        loc=1, shadow=True, bbox_to_anchor=(0.0, 1.05, 1.0, 0.2), mode="expand", ncol=5
+    )
     box = ax.get_position()
-    ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
-    
+    ax.set_position([box.x0, box.y0, box.width, box.height * 0.8])
+
 # Start and end of time-step
-real_start_t = (mintic_step - tic_step)/ CPU_CLOCK
-ax.plot([real_start_t, real_start_t], [0, nthread + nrow + 1], 'k--', linewidth=1)
+real_start_t = (mintic_step - tic_step) / CPU_CLOCK
+ax.plot([real_start_t, real_start_t], [0, nthread + nrow + 1], "k--", linewidth=1)
 
-ax.plot([end_t, end_t], [0, nthread + nrow + 1], 'k--', linewidth=1)
+ax.plot([end_t, end_t], [0, nthread + nrow + 1], "k--", linewidth=1)
 
-ax.set_xlabel("Wall clock time [ms]", labelpad=0.)
+ax.set_xlabel("Wall clock time [ms]", labelpad=0.0)
 if expand == 1:
-    ax.set_ylabel("Thread ID", labelpad=0 )
+    ax.set_ylabel("Thread ID", labelpad=0)
 else:
-    ax.set_ylabel("Thread ID * " + str(expand), labelpad=0 )
-ax.set_yticks(pl.array(range(nthread)), True)
+    ax.set_ylabel("Thread ID * " + str(expand), labelpad=0)
+ax.set_yticks(pl.array(list(range(nthread))), True)
 
 loc = plticker.MultipleLocator(base=expand)
 ax.yaxis.set_major_locator(loc)
-ax.grid(True, which='major', axis="y", linestyle="-")
+ax.grid(True, which="major", axis="y", linestyle="-")
 
 pl.show()
 pl.savefig(outpng)
-print "Graphics done, output written to", outpng
+print("Graphics done, output written to", outpng)
 
 sys.exit(0)
diff --git a/examples/process_plot_tasks b/tools/task_plots/process_plot_tasks
similarity index 100%
rename from examples/process_plot_tasks
rename to tools/task_plots/process_plot_tasks
diff --git a/examples/process_plot_tasks_MPI b/tools/task_plots/process_plot_tasks_MPI
similarity index 89%
rename from examples/process_plot_tasks_MPI
rename to tools/task_plots/process_plot_tasks_MPI
index 22c9a106f52ca28244f9fef60839b1125474f14c..736aad05b98aea619f79e2b2114815c8e0fbaa1c 100755
--- a/examples/process_plot_tasks_MPI
+++ b/tools/task_plots/process_plot_tasks_MPI
@@ -87,10 +87,22 @@ echo $list | xargs -n 3 | while read f s g; do
 <ul style="list-style-type:none">
 <li>
 EOF
+
+    cat <<EOF3 > step${s}r.html
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<body>
+EOF3
+
     for i in $(seq 0 $nrank); do
-        cat <<EOF2 >> index.html
-<a href="step${s}r${i}.html"><img src="step${s}r${i}.png" width=400px/></a>
-EOF2
+
+        cat <<EOF >> index.html
+<a href="step${s}r.html"><img src="step${s}r${i}.png" width=400px/></a>
+EOF
+        cat <<EOF3 >> step${s}r.html
+<a href="step${s}r${i}.html"><img src="step${s}r${i}.png"/></a>
+EOF3
+
     cat <<EOF2 > step${s}r${i}.html
  <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
 <html>
@@ -104,7 +116,14 @@ cat <<EOF2 >> step${s}r${i}.html
 </body>
 </html>
 EOF2
+
     done
+
+cat <<EOF3 >> step${s}r.html
+</body>
+</html>
+EOF3
+
 cat <<EOF >> index.html
 </li>
 </ul>
diff --git a/examples/process_plot_taskthreadpools b/tools/task_plots/process_plot_taskthreadpools
similarity index 100%
rename from examples/process_plot_taskthreadpools
rename to tools/task_plots/process_plot_taskthreadpools
diff --git a/examples/process_plot_taskthreadpools_helper b/tools/task_plots/process_plot_taskthreadpools_helper
similarity index 100%
rename from examples/process_plot_taskthreadpools_helper
rename to tools/task_plots/process_plot_taskthreadpools_helper
diff --git a/examples/process_plot_threadpool b/tools/task_plots/process_plot_threadpool
similarity index 100%
rename from examples/process_plot_threadpool
rename to tools/task_plots/process_plot_threadpool