diff --git a/src/cell.c b/src/cell.c
index ee798b954504b0f26f9662ebc00fac75d0b364ce..767b55985bd9ce4a179289acab601455d0f37d54 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -2061,10 +2061,12 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
     struct task *t = l->t;
     struct cell *ci = t->ci;
     struct cell *cj = t->cj;
+    const int ci_active = cell_is_active(ci, e);
+    const int cj_active = (cj != NULL) ? cell_is_active(cj, e) : 0;
 
     /* Only activate tasks that involve a local active cell. */
-    if ((cell_is_active(ci, e) && ci->nodeID == engine_rank) ||
-        (cj != NULL && cell_is_active(cj, e) && cj->nodeID == engine_rank)) {
+    if ((ci_active && ci->nodeID == engine_rank) ||
+        (cj_active && cj->nodeID == engine_rank)) {
       scheduler_activate(s, t);
 
       /* Set the drifting flags */
@@ -2075,6 +2077,65 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
         cell_activate_subcell_grav_tasks(t->ci, NULL, s);
       } else if (t->type == task_type_pair) {
         cell_activate_subcell_grav_tasks(t->ci, t->cj, s);
+      
+#ifdef WITH_MPI
+        /* Activate the send/recv tasks. */
+        if (ci->nodeID != engine_rank) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (cj_active) {
+            scheduler_activate(s, ci->recv_grav);
+            scheduler_activate(s, ci->recv_multipole);
+	  }
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (ci_active) scheduler_activate(s, ci->recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (ci_active) {
+
+	    scheduler_activate_send(s, cj->send_grav, ci->nodeID);
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_gpart(cj, s);
+
+	    scheduler_activate_send(s, cj->send_multipole, ci->nodeID);
+	  }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
+
+	} else if (cj->nodeID != engine_rank) {
+
+          /* If the local cell is active, receive data from the foreign cell. */
+          if (ci_active) {
+            scheduler_activate(s, cj->recv_grav);
+            scheduler_activate(s, cj->recv_multipole);
+	  }
+
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (cj_active) scheduler_activate(s, cj->recv_ti);
+
+          /* Is the foreign cell active and will need stuff from us? */
+          if (cj_active) {
+	    
+	    scheduler_activate_send(s, ci->send_grav, cj->nodeID);
+
+
+            /* Drift the cell which will be sent at the level at which it is
+               sent, i.e. drift the cell specified in the send task (l->t)
+               itself. */
+            cell_activate_drift_gpart(ci, s);
+
+	    scheduler_activate_send(s, ci->send_multipole, cj->nodeID);
+	  }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
+	}
+#endif
       }
     }
   }
diff --git a/src/const.h b/src/const.h
index 1670f2ea6f18d6f35712b3d7d358fa50e8c543c8..9dab663dd1675e00b6aeb1d482e063603ae968dd 100644
--- a/src/const.h
+++ b/src/const.h
@@ -114,6 +114,6 @@
 //#define SOURCETERMS_SN_FEEDBACK
 
 //#define ICHECK 5726454604296ll
-#define ICHECK 5268994168350ll
+//#define ICHECK 6745760614196ll
 
 #endif /* SWIFT_CONST_H */
diff --git a/src/engine.c b/src/engine.c
index 6ba3d58194a83a7cd4256f2e21d251e6e5a4fb4d..9334129ad134684797531c9b1e62db34a2f750c6 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -1168,7 +1168,7 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell *
  * @param t_multi The send_multi #task, if it has already been created.
  */
 void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell *cj,
-				  struct task *t_grav, struct task *t_multi ) {
+				  struct task *t_grav, struct task *t_multi, struct task *t_ti) {
 
 #ifdef WITH_MPI
   struct link *l = NULL;
@@ -1193,24 +1193,32 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell
       t_multi = scheduler_addtask(s, task_type_send, task_subtype_multipole, 6 * ci->tag + 5,
 				  0, ci, cj);
 
+      t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend,
+                               6 * ci->tag + 2, 0, ci, cj);
+
       /* The sends should unlock the down pass. */
       scheduler_addunlock(s, t_multi, ci->super->grav_down);
       scheduler_addunlock(s, t_grav, ci->super->grav_down);
 
       /* Drift before you send */
       scheduler_addunlock(s, ci->super->drift_gpart, t_grav);
+      scheduler_addunlock(s, ci->super->init_grav, t_multi);
+
+      /* The super-cell's timestep task should unlock the send_ti task. */
+      scheduler_addunlock(s, ci->super->timestep, t_ti);
     }
 
     /* Add them to the local cell. */
     engine_addlink(e, &ci->send_grav, t_grav);
     engine_addlink(e, &ci->send_multipole, t_multi);
+    engine_addlink(e, &ci->send_ti, t_ti);
   }
 
   /* Recurse? */
   if (ci->split)
     for (int k = 0; k < 8; k++)
       if (ci->progeny[k] != NULL)
-        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi);
+        engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi, t_ti);
 
 #else
   error("SWIFT was not compiled with MPI support.");
@@ -1301,7 +1309,7 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t
  * @param t_grav The recv_gpart #task, if it has already been created.
  * @param t_multi The recv_multipole #task, if it has already been created.
  */
-void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task *t_grav, struct task *t_multi) {
+void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task *t_grav, struct task *t_multi, struct task *t_ti) {
 
 #ifdef WITH_MPI
   struct scheduler *s = &e->sched;
@@ -1314,21 +1322,26 @@ void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task
                              c, NULL);
     t_multi = scheduler_addtask(s, task_type_recv, task_subtype_multipole, 6 * c->tag + 5, 0,
 				c, NULL);
+
+    t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend,
+                             6 * c->tag + 2, 0, c, NULL);
   }
 
   c->recv_grav = t_grav;
   c->recv_multipole = t_multi;
+  c->recv_ti = t_ti;
   
   for (struct link *l = c->grav; l != NULL; l = l->next) {
     scheduler_addunlock(s, t_grav, l->t);
     scheduler_addunlock(s, t_multi, l->t);
+    scheduler_addunlock(s, l->t, t_ti);
   }
 
   /* Recurse? */
   if (c->split)
     for (int k = 0; k < 8; k++)
       if (c->progeny[k] != NULL)
-        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_multi);
+        engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_multi, t_ti);
 
 #else
   error("SWIFT was not compiled with MPI support.");
@@ -2788,7 +2801,7 @@ void engine_maketasks(struct engine *e) {
 
       if(e->policy & engine_policy_self_gravity)
 	for (int k = 0; k < p->nr_cells_in; k++)
-	  engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL);
+	  engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL, NULL);
 
       /* Loop through the proxy's outgoing cells and add the
          send tasks. */
@@ -2799,7 +2812,7 @@ void engine_maketasks(struct engine *e) {
 
       if(e->policy & engine_policy_self_gravity)
 	for (int k = 0; k < p->nr_cells_out; k++)
-	  engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, NULL);
+	  engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, NULL, NULL);
     }
   }
 #endif
@@ -3024,6 +3037,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
             scheduler_activate(s, ci->recv_multipole);
 	  }
 
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (ci_active) scheduler_activate(s, ci->recv_ti);
+
           /* Is the foreign cell active and will need stuff from us? */
           if (ci_active) {
 
@@ -3037,6 +3053,10 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
 
 	    scheduler_activate_send(s, cj->send_multipole, ci->nodeID);
 	  }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
+
 	} else if (cj->nodeID != engine_rank) {
 
           /* If the local cell is active, receive data from the foreign cell. */
@@ -3045,6 +3065,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
             scheduler_activate(s, cj->recv_multipole);
 	  }
 
+          /* If the foreign cell is active, we want its ti_end values. */
+          if (cj_active) scheduler_activate(s, cj->recv_ti);
+
           /* Is the foreign cell active and will need stuff from us? */
           if (cj_active) {
 
@@ -3059,6 +3082,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
 
 	    scheduler_activate_send(s, ci->send_multipole, cj->nodeID);
 	  }
+
+          /* If the local cell is active, send its ti_end values. */
+          if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
 	}
 #endif
       }
diff --git a/src/multipole.h b/src/multipole.h
index 9e157370fcf5c6cf751264c2145ad69e803544dc..f7a3a2e7f00e2bed67ff02096f317f7d682e987d 100644
--- a/src/multipole.h
+++ b/src/multipole.h
@@ -232,7 +232,7 @@ INLINE static void gravity_drift(struct gravity_tensors *m, double dt,
   m->CoM[2] += dz;
 
   /* Conservative change in maximal radius containing all gpart */
-  m->r_max = m->r_max_rebuild + x_diff;
+  m->r_max = m->r_max_rebuild + 0.*x_diff;
 }
 
 /**
diff --git a/src/runner.c b/src/runner.c
index 87094710f558d20a915182d3b1ca15db0815e5c1..14814473bf65c1f32042185bddd78c78caf0f80b 100644
--- a/src/runner.c
+++ b/src/runner.c
@@ -1422,8 +1422,9 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
 
 #if (ICHECK != 0)
   for(int i=0; i < c->gcount; ++i)
-    if(c->gparts[i].id_or_neg_offset == ICHECK)
-      message("Found gpart");
+    if(c->gparts[i].id_or_neg_offset == ICHECK) {
+      message("Found gpart"); fflush(stdout);
+    }
 #endif
 
 
@@ -1484,7 +1485,7 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
 
           /* Check that this gpart has interacted with all the other
            * particles (via direct or multipoles) in the box */
-          if (gp->num_interacted != e->total_nr_gparts /*&& gp->id_or_neg_offset == ICHECK*/)
+          if (gp->num_interacted != e->total_nr_gparts && gp->id_or_neg_offset == ICHECK)
             error(
                 "g-particle (id=%lld, type=%s) did not interact "
                 "gravitationally "
diff --git a/src/runner_doiact_grav.h b/src/runner_doiact_grav.h
index 844c8caca2d3c85f5ced6ea593a391a0b5b2438b..fc6289c6cb3558ed8f9632aa5a8718c3f4123f24 100644
--- a/src/runner_doiact_grav.h
+++ b/src/runner_doiact_grav.h
@@ -1,3 +1,4 @@
+
 /*******************************************************************************
  * This file is part of SWIFT.
  * Copyright (c) 2013 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
@@ -135,6 +136,7 @@ void runner_do_grav_down(struct runner *r, struct cell *c, int timer) {
 void runner_dopair_grav_mm(const struct runner *r, struct cell *restrict ci,
                            struct cell *restrict cj) {
 
+
   /* Some constants */
   const struct engine *e = r->e;
   const struct space *s = e->s;
@@ -1237,14 +1239,15 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
        (abs(j-jj) <= 1 || abs(j-jj - cdim[1]) <= 1 || abs(j-jj + cdim[1]) <= 1) && 
        (abs(k-kk) <= 1 || abs(k-kk - cdim[2]) <= 1 || abs(k-kk + cdim[2]) <= 1)) {
 
-#if (ICHECK != 0)
-      if(check) {
-	++direct_ngbs;
-	direct_ngbs_gpart += cj->multipole->m_pole.num_gpart;
-	/* message("Found direct neighbour %d: (i,j,k)=(%d,%d,%d) (ii,jj,kk)=(%d,%d,%d) nodeID=%d", */
-	/* 	direct_ngbs, i,j,k, ii,jj,kk, cj->nodeID); */
-      }
-#endif
+
+/* #if (ICHECK != 0) */
+/*       if(check) { */
+/* 	++direct_ngbs; */
+/* 	direct_ngbs_gpart += cj->multipole->m_pole.num_gpart; */
+/* 	message("Found direct neighbour %d: (i,j,k)=(%d,%d,%d) (ii,jj,kk)=(%d,%d,%d) nodeID=%d", */
+/* 		direct_ngbs, i,j,k, ii,jj,kk, cj->nodeID); */
+/*       } */
+/* #endif */
       
 
     }else{
@@ -1287,11 +1290,6 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
       }
     } /* We are in charge of this pair */
   }   /* Loop over top-level cells */
-
-
-  if(check)
-    message("Interacted with %d indirectly and ignored %d direct interactions (counter=%lld) nr_cells=%d",
-	    other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells);
 	    
 
 #ifdef SWIFT_DEBUG_CHECKS
@@ -1300,6 +1298,10 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
     error("Not found the right number of particles in top-level interactions");
 #endif
 
+  if(check)
+    message("Interacted with %d indirectly and ignored %d direct interactions (counter=%lld) nr_cells=%d total=%lld",
+	    other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells, e->total_nr_gparts);
+
   if (timer) TIMER_TOC(timer_dograv_long_range);
 }