diff --git a/INSTALL.swift b/INSTALL.swift
index a07d5b24c2d8c75778e2a24d90f77724459ab61f..8e1635b0715426512503fd9dcde32f59a7ad1b62 100644
--- a/INSTALL.swift
+++ b/INSTALL.swift
@@ -83,39 +83,65 @@ SWIFT depends on a number of third party libraries that should be available
 before you can build it.
 
 
-HDF5: a HDF5 library (v. 1.8.x or higher) is required to read and write
-particle data. One of the commands "h5cc" or "h5pcc" should be
-available. If "h5pcc" is located them a parallel HDF5 built for the version
-of MPI located should be provided. If the command is not available then it
-can be located using the "--with-hfd5" configure option. The value should
-be the full path to the "h5cc" or "h5pcc" commands.
+ - HDF5: a HDF5 library (v. 1.8.x or higher) is required to read and
+         write particle data. One of the commands "h5cc" or "h5pcc"
+         should be available. If "h5pcc" is located them a parallel
+         HDF5 built for the version of MPI located should be
+         provided. If the command is not available then it can be
+         located using the "--with-hfd5" configure option. The value
+         should be the full path to the "h5cc" or "h5pcc" commands.
 
 
-MPI: an optional MPI library that fully supports MPI_THREAD_MULTIPLE.
-Before running configure the "mpirun" command should be available in the
-shell. If your command isn't called "mpirun" then define the "MPIRUN"
-environment variable, either in the shell or when running configure.
+ - MPI: to run on more than one node an MPI library that fully
+        supports MPI_THREAD_MULTIPLE.  Before running configure the
+        "mpirun" command should be available in the shell. If your
+        command isn't called "mpirun" then define the "MPIRUN"
+        environment variable, either in the shell or when running
+        configure.
 
-The MPI compiler can be controlled using the MPICC variable, much like
-the CC one. Use this when your MPI compiler has a none-standard name.
+	The MPI compiler can be controlled using the MPICC variable,
+	much like the CC one. Use this when your MPI compiler has a
+	none-standard name.
 
 
-METIS: a build of the METIS library can be optionally used to optimize the
-load between MPI nodes (requires an MPI library). This should be found in
-the standard installation directories, or pointed at using the
-"--with-metis" configuration option.  In this case the top-level
-installation directory of the METIS build should be given. Note to use
-METIS you should at least supply "--with-metis".
+ - libtool: The build system relies on libtool.
 
 
-libNUMA: a build of the NUMA library can be used to pin the threads to
-the physical core of the machine SWIFT is running on. This is not always
-necessary as the OS scheduler may do a good job at distributing the threads
-among the different cores on each computing node.
+                           Optional Dependencies
+                           =====================
 
 
-DOXYGEN: the doxygen library is required to create the SWIFT API
-documentation.
+ - METIS: a build of the METIS library can be optionally used to
+          optimize the load between MPI nodes (requires an MPI
+          library). This should be found in the standard installation
+          directories, or pointed at using the "--with-metis"
+          configuration option.  In this case the top-level
+          installation directory of the METIS build should be
+          given. Note to use METIS you should at least supply
+          "--with-metis".
+
+
+ - libNUMA: a build of the NUMA library can be used to pin the threads
+            to the physical core of the machine SWIFT is running
+            on. This is not always necessary as the OS scheduler may
+            do a good job at distributing the threads among the
+            different cores on each computing node.
+
+
+ - TCMalloc: a build of the TCMalloc library (part of gperftools) can
+             be used to obtain faster allocations than the standard C
+             malloc function part of glibc. The option "-with-tcmalloc"
+	     should be passed to the configuration script to use it.
+
+
+ - gperftools: a build of gperftools can be used to obtain good
+               profiling of the code. The option "-with-profiler"
+               needs to be passed to the configuration script to use
+               it.
+
+
+ - DOXYGEN: the doxygen library is required to create the SWIFT API
+            documentation.
 
 
 
diff --git a/configure.ac b/configure.ac
index 977a86a03d5e4c82139dfd56d3f1ee7afe3ea6e8..f6e2ea0db8e9829b719a0190eea7a1d891bfbbd6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -355,7 +355,7 @@ fi
 AC_SUBST([TCMALLOC_LIBS])
 AM_CONDITIONAL([HAVETCMALLOC],[test -n "$TCMALLOC_LIBS"])
 
-#  Check for -lprofiler usually part of the gpreftools along with tcmalloc.
+#  Check for -lprofiler usually part of the gperftools along with tcmalloc.
 have_profiler="no"
 AC_ARG_WITH([profiler],
    [AS_HELP_STRING([--with-profiler],
diff --git a/src/engine.c b/src/engine.c
index 4a01f2d3a75037663457f330aac7d1c59df52ddc..1323cd7c7d30f5e24b5faee1f2fcb9c0ea85e592 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -121,17 +121,15 @@ void engine_addlink(struct engine *e, struct link **l, struct task *t) {
  *
  * @param e The #engine.
  * @param c The #cell.
- * @param super The super #cell.
  */
 void engine_make_hierarchical_tasks(struct engine *e, struct cell *c) {
 
   struct scheduler *s = &e->sched;
+  const int is_fixdt = (e->policy & engine_policy_fixdt);
+  const int is_hydro = (e->policy & engine_policy_hydro);
+  const int is_with_cooling = (e->policy & engine_policy_cooling);
+  const int is_with_sourceterms = (e->policy & engine_policy_sourceterms);
 
-  const int is_fixdt = (e->policy & engine_policy_fixdt) == engine_policy_fixdt;
-  const int is_with_cooling =
-      (e->policy & engine_policy_cooling) == engine_policy_cooling;
-  const int is_with_sourceterms =
-      (e->policy & engine_policy_sourceterms) == engine_policy_sourceterms;
 
   /* Are we in a super-cell ? */
   if (c->super == c) {
@@ -153,9 +151,9 @@ void engine_make_hierarchical_tasks(struct engine *e, struct cell *c) {
       }
 
       /* Generate the ghost task. */
-
-      c->ghost = scheduler_addtask(s, task_type_ghost, task_subtype_none, 0, 0,
-                                   c, NULL, 0);
+      if (is_hydro)
+        c->ghost = scheduler_addtask(s, task_type_ghost, task_subtype_none, 0,
+                                     0, c, NULL, 0);
 
 #ifdef EXTRA_HYDRO_LOOP
       /* Generate the extra ghost task. */
@@ -164,9 +162,7 @@ void engine_make_hierarchical_tasks(struct engine *e, struct cell *c) {
                                            task_subtype_none, 0, 0, c, NULL, 0);
 #endif
 
-
       /* Cooling task */
-
       if (is_with_cooling)
         c->cooling = scheduler_addtask(s, task_type_cooling, task_subtype_none,
                                        0, 0, c, NULL, 0);
@@ -1793,14 +1789,7 @@ void engine_make_extra_hydroloop_tasks(struct engine *e) {
       }
 #endif
     }
-
-    /* External gravity tasks should depend on init and unlock the kick */
-    else if (t->type == task_type_grav_external) {
-      scheduler_addunlock(sched, t->ci->init, t);
-      scheduler_addunlock(sched, t, t->ci->kick);
-    }
     /* Cooling tasks should depend on kick and unlock sourceterms */
-
     else if (t->type == task_type_cooling) {
       scheduler_addunlock(sched, t->ci->kick, t);
     }
@@ -1879,7 +1868,8 @@ void engine_maketasks(struct engine *e) {
   if (e->policy & engine_policy_external_gravity)
     engine_make_external_gravity_tasks(e);
 
-  if (e->sched.nr_tasks == 0) error("No hydro or gravity tasks created.");
+  if (e->sched.nr_tasks == 0 && (s->nr_gparts > 0 || s->nr_parts > 0))
+    error("We have particles but no hydro or gravity tasks were created.");
 
   /* Split the tasks. */
   scheduler_splittasks(sched);
diff --git a/src/space.c b/src/space.c
index fc1f63cb6c07e7ce8a9ed1e8b4a54491471879f6..7f6e2880304939c96397563be299158817e2dfff 100644
--- a/src/space.c
+++ b/src/space.c
@@ -445,7 +445,7 @@ void space_rebuild(struct space *s, double cell_max, int verbose) {
   if ((ind = (int *)malloc(sizeof(int) * ind_size)) == NULL)
     error("Failed to allocate temporary particle indices.");
   if (ind_size > 0) space_parts_get_cell_index(s, ind, cells_top, verbose);
-  for (size_t i = 0; i < ind_size; ++i) cells_top[ind[i]].count++;
+  for (size_t i = 0; i < s->nr_parts; ++i) cells_top[ind[i]].count++;
 
   /* Run through the gravity particles and get their cell index. */
   const size_t gind_size = s->size_gparts;
@@ -453,7 +453,7 @@ void space_rebuild(struct space *s, double cell_max, int verbose) {
   if ((gind = (int *)malloc(sizeof(int) * gind_size)) == NULL)
     error("Failed to allocate temporary g-particle indices.");
   if (gind_size > 0) space_gparts_get_cell_index(s, gind, cells_top, verbose);
-  for (size_t i = 0; i < gind_size; ++i) cells_top[gind[i]].gcount++;
+  for (size_t i = 0; i < s->nr_gparts; ++i) cells_top[gind[i]].gcount++;
 
 #ifdef WITH_MPI
 
diff --git a/src/task.c b/src/task.c
index e58f80d76b5bdf99b37ddcfc7b480330a3d66e93..277739c2b60b3b6600cf360cb8a565e93b98bdd4 100644
--- a/src/task.c
+++ b/src/task.c
@@ -47,10 +47,10 @@
 
 /* Task type names. */
 const char *taskID_names[task_type_count] = {
-    "none",       "sort",    "self",          "pair",          "sub_self",
-    "sub_pair",   "init",    "ghost",         "extra_ghost",   "kick",
-    "kick_fixdt", "send",    "recv",          "grav_gather_m", "grav_fft",
-    "grav_mm",    "grav_up", "grav_external", "cooling",       "sourceterms"};
+    "none",       "sort",    "self",    "pair",          "sub_self",
+    "sub_pair",   "init",    "ghost",   "extra_ghost",   "kick",
+    "kick_fixdt", "send",    "recv",    "grav_gather_m", "grav_fft",
+    "grav_mm",    "grav_up", "cooling", "sourceterms"};
 
 const char *subtaskID_names[task_subtype_count] = {
     "none", "density", "gradient", "force", "grav", "external_grav", "tend"};