diff --git a/src/cell.c b/src/cell.c
index 2eb091478f79586912da509e8a111c6e315a8c57..6e51c88f891e4f74f0414391f51c8b768d2f251b 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -3430,56 +3430,61 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
     const int cj_nodeID = nodeID;
 #endif
 
+    /* Activate the drifts */
+    if (t->type == task_type_self && ci_active) {
+      cell_activate_drift_part(ci, s);
+      cell_activate_drift_spart(ci, s);
+    }
+
     /* Only activate tasks that involve a local active cell. */
-    if ((ci_active && ci_nodeID == nodeID) ||
-        (cj_active && cj_nodeID == nodeID)) {
+    if ((ci_active || cj_active) &&
+        (ci_nodeID == nodeID || cj_nodeID == nodeID)) {
+
       scheduler_activate(s, t);
 
-      /* Activate the drifts */
-      if (t->type == task_type_self) {
-        if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
-        if (ci_nodeID == nodeID) cell_activate_drift_spart(ci, s);
-      }
+      if (t->type == task_type_pair) {
 
-      /* Store current values of dx_max and h_max. */
-      else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
-        cell_activate_subcell_stars_tasks(t->ci, t->cj, s);
-      }
+        /* Do ci */
+        if (ci_active) {
+          /* stars for ci */
+          atomic_or(&ci->stars.requires_sorts, 1 << t->flags);
+          ci->stars.dx_max_sort_old = ci->stars.dx_max_sort;
 
-      /* Set the correct sorting flags and activate hydro drifts */
-      else if (t->type == task_type_pair) {
+          /* hydro for cj */
+          atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
+          cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
+
+          /* Activate the drift tasks. */
+          if (ci_nodeID == nodeID) cell_activate_drift_spart(ci, s);
+          if (cj_nodeID == nodeID) cell_activate_drift_part(cj, s);
+
+          /* Check the sorts and activate them if needed. */
+          cell_activate_stars_sorts(ci, t->flags, s);
+          cell_activate_hydro_sorts(cj, t->flags, s);
+        }
+
+        /* Do cj */
+        if (cj_active) {
+          /* hydro for ci */
+          atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
+          ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
+
+          /* stars for cj */
+          atomic_or(&cj->stars.requires_sorts, 1 << t->flags);
+          cj->stars.dx_max_sort_old = cj->stars.dx_max_sort;
+
+          /* Activate the drift tasks. */
+          if (cj_nodeID == nodeID) cell_activate_drift_spart(cj, s);
+          if (ci_nodeID == nodeID) cell_activate_drift_part(ci, s);
+
+          /* Check the sorts and activate them if needed. */
+          cell_activate_hydro_sorts(ci, t->flags, s);
+          cell_activate_stars_sorts(cj, t->flags, s);
+        }
+      }
 
-	/* Activate the drift tasks. */
-	if (ci->nodeID == nodeID) {
-	  cell_activate_drift_part(ci, s);
-	  cell_activate_drift_spart(ci, s);
-	}
-	if (cj->nodeID == nodeID) {
-	  cell_activate_drift_part(cj, s);
-	  cell_activate_drift_spart(cj, s);
-	}
-	
-	/* stars for ci */
-	atomic_or(&ci->stars.requires_sorts, 1 << t->flags);
-	ci->stars.dx_max_sort_old = ci->stars.dx_max_sort;
-	
-	/* hydro for cj */
-	atomic_or(&cj->hydro.requires_sorts, 1 << t->flags);
-	cj->hydro.dx_max_sort_old = cj->hydro.dx_max_sort;
-	
-	/* hydro for ci */
-	atomic_or(&ci->hydro.requires_sorts, 1 << t->flags);
-	ci->hydro.dx_max_sort_old = ci->hydro.dx_max_sort;
-	
-	/* stars for cj */
-	atomic_or(&cj->stars.requires_sorts, 1 << t->flags);
-	cj->stars.dx_max_sort_old = cj->stars.dx_max_sort;
-	
-	/* Check the sorts and activate them if needed. */
-	cell_activate_hydro_sorts(ci, t->flags, s);
-	cell_activate_hydro_sorts(cj, t->flags, s);
-	cell_activate_stars_sorts(ci, t->flags, s);
-	cell_activate_stars_sorts(cj, t->flags, s);
+      else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
+        cell_activate_subcell_stars_tasks(ci, cj, s);
       }
     }
 
@@ -3496,13 +3501,12 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
       if (ci_nodeID != nodeID) {
 
         if (cj_active) {
+          scheduler_activate(s, ci->mpi.hydro.recv_xv);
           scheduler_activate(s, ci->mpi.hydro.recv_rho);
-	  scheduler_activate(s, ci->mpi.stars.recv);
-	  
-          /* If the local cell is active, more stuff will be needed.
-           */
+
+          /* If the local cell is active, more stuff will be needed. */
           scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
-          scheduler_activate_send(s, cj->mpi.hydro.send, ci_nodeID);
+          cell_activate_drift_spart(cj, s);
 
           /* If the local cell is active, send its ti_end values. */
           scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
@@ -3515,6 +3519,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
           scheduler_activate(s, ci->mpi.recv_ti);
 
           /* Is the foreign cell active and will need stuff from us? */
+          scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
           scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
 
           /* Drift the cell which will be sent; note that not all sent
@@ -3525,12 +3530,13 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
       } else if (cj_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
-        if (ci_active) {  
+        if (ci_active) {
           scheduler_activate(s, cj->mpi.hydro.recv_xv);
+          scheduler_activate(s, cj->mpi.hydro.recv_rho);
 
-          /* If the local cell is active, more stuff will be needed.
-           */
+          /* If the local cell is active, more stuff will be needed. */
           scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
+          cell_activate_drift_spart(ci, s);
 
           /* If the local cell is active, send its ti_end values. */
           scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
@@ -3544,6 +3550,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
 
           /* Is the foreign cell active and will need stuff from us? */
           scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
+          scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
 
           /* Drift the cell which will be sent; note that not all sent
              particles will be drifted, only those that are needed. */
@@ -3573,17 +3580,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
         (cj_active && ci_nodeID == nodeID)) {
       scheduler_activate(s, t);
 
-      if (t->type == task_type_self) {
-        /* Nothing to do here, all was drifted already */
-      }
-
-      else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
-        /* Nothing to do here, all was drifted already */
-      }
-
-      /* Set the correct sorting flags and activate hydro drifts */
-      else if (t->type == task_type_pair) {
-      }
+      /* Nothing more to do here, all drifts and sorts activated above */
     }
   }
 
diff --git a/src/engine_maketasks.c b/src/engine_maketasks.c
index c4aa1d7181e3ea0479d5723412486509438ab2a6..83db2d91a0ad93daa69c5f778c66b0d512f53c4e 100644
--- a/src/engine_maketasks.c
+++ b/src/engine_maketasks.c
@@ -245,6 +245,9 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
 
       /* Ghost before you send */
       scheduler_addunlock(s, ci->hydro.super->stars.ghost, t_feedback);
+
+      /* Drift before you send */
+      scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
     }
 
     engine_addlink(e, &ci->mpi.stars.send, t_feedback);
@@ -404,6 +407,7 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
 #endif
 
   for (struct link *l = c->stars.density; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_xv, l->t);
     scheduler_addunlock(s, t_rho, l->t);
   }
 
@@ -447,7 +451,9 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
 
   c->mpi.stars.recv = t_feedback;
 
+#ifdef SWIFT_DEBUG_CHECKS
   if (c->nodeID == e->nodeID) error("Local cell!");
+#endif
   if (c->stars.sorts != NULL)
     scheduler_addunlock(s, t_feedback, c->stars.sorts);