diff --git a/src/cell.c b/src/cell.c
index 28f14fe34d078dc1b1355169e7180d0b33c626c0..1531c72a31f7ad35adeaca84d43116edf8d56995 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -1236,7 +1236,7 @@ void cell_clean_links(struct cell *c, void *data) {
   c->hydro.force = NULL;
   c->grav.grav = NULL;
   c->grav.mm = NULL;
-  // TODO Alexei: set feedback to NULL
+  c->stars.feedback = NULL;
   c->stars.density = NULL;
 }
 
@@ -1753,11 +1753,16 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
 
   if (c == c->super) {
 #ifdef SWIFT_DEBUG_CHECKS
-    if (c->stars.sorts == NULL)
+    if (c->stars.sorts_local == NULL && c->stars.sorts_foreign == NULL)
       error("Trying to activate un-existing c->stars.sorts");
 #endif
-    scheduler_activate(s, c->stars.sorts);
-    if (c->nodeID == engine_rank) {
+    if (c->stars.sorts_local) {
+      scheduler_activate(s, c->stars.sorts_local);
+    }
+    if (c->stars.sorts_foreign) {
+      scheduler_activate(s, c->stars.sorts_foreign);
+    }
+    if (c->stars.sorts_local) {
       // MATTHIEU: to do: do we actually need both drifts here?
       cell_activate_drift_part(c, s);
       cell_activate_drift_spart(c, s);
@@ -1770,11 +1775,17 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
       parent->stars.do_sub_sort = 1;
       if (parent == c->super) {
 #ifdef SWIFT_DEBUG_CHECKS
-        if (parent->stars.sorts == NULL)
+        if (parent->stars.sorts_local == NULL &&
+            parent->stars.sorts_foreign == NULL)
           error("Trying to activate un-existing parents->stars.sorts");
 #endif
-        scheduler_activate(s, parent->stars.sorts);
-        if (parent->nodeID == engine_rank) {
+        if (parent->stars.sorts_local) {
+          scheduler_activate(s, parent->stars.sorts_local);
+        }
+        if (parent->stars.sorts_foreign) {
+          scheduler_activate(s, parent->stars.sorts_foreign);
+        }
+        if (parent->stars.sorts_local) {
           cell_activate_drift_part(parent, s);
           cell_activate_drift_spart(parent, s);
         }
@@ -1789,9 +1800,6 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
  */
 void cell_activate_stars_sorts(struct cell *c, int sid, struct scheduler *s) {
 
-  // TODO Alexei, remove this
-  if (c->nodeID != engine_rank) return;
-
   /* Do we need to re-sort? */
   if (c->stars.dx_max_sort > space_maxreldx * c->dmin) {
 
@@ -3148,18 +3156,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
       /* Activate the send/recv tasks. */
       if (ci_nodeID != nodeID) {
 
-        // TODO Alexei: In this section, you will find some comments that
-        // are from the hydro code. It should look the same for the feedback.
-        /* If the local cell is active, receive data from the foreign cell. */
         if (cj_active) {
           scheduler_activate(s, ci->mpi.hydro.recv_xv);
-          /* if (ci_active) { */
-          /*   scheduler_activate(s, ci->mpi.hydro.recv_rho); */
-          /* } */
+          if (ci_active) {
+            scheduler_activate(s, ci->mpi.stars.recv);
+          }
         }
 
-        /* /\* If the foreign cell is active, we want its ti_end values. *\/ */
-        /* if (ci_active) scheduler_activate(s, ci->mpi.recv_ti); */
+        /* If the foreign cell is active, we want its ti_end values. */
+        if (ci_active) scheduler_activate(s, ci->mpi.recv_ti);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (ci_active) {
@@ -3170,30 +3175,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
              particles will be drifted, only those that are needed. */
           cell_activate_drift_part(cj, s);
 
-          /* /\* If the local cell is also active, more stuff will be needed.
-           * *\/ */
-          /* if (cj_active) { */
-          /*   scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID); */
-
-          /* } */
+          /* If the local cell is also active, more stuff will be needed.
+           */
+          if (cj_active) {
+            scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
+          }
         }
 
-        /* /\* If the local cell is active, send its ti_end values. *\/ */
-        /* if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti,
-         * ci_nodeID); */
+        /* If the local cell is active, send its ti_end values. */
+        if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
 
       } else if (cj_nodeID != nodeID) {
 
         /* If the local cell is active, receive data from the foreign cell. */
         if (ci_active) {
           scheduler_activate(s, cj->mpi.hydro.recv_xv);
-          /* if (cj_active) { */
-          /*   scheduler_activate(s, cj->mpi.hydro.recv_rho); */
-          /* } */
+          if (cj_active) {
+            scheduler_activate(s, cj->mpi.stars.recv);
+          }
         }
 
-        /* /\* If the foreign cell is active, we want its ti_end values. *\/ */
-        /* if (cj_active) scheduler_activate(s, cj->mpi.recv_ti); */
+        /* If the foreign cell is active, we want its ti_end values. */
+        if (cj_active) scheduler_activate(s, cj->mpi.recv_ti);
 
         /* Is the foreign cell active and will need stuff from us? */
         if (cj_active) {
@@ -3204,18 +3207,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
              particles will be drifted, only those that are needed. */
           cell_activate_drift_part(ci, s);
 
-          /* /\* If the local cell is also active, more stuff will be needed.
-           * *\/ */
-          /* if (ci_active) { */
-
-          /*   scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID); */
-
-          /* } */
+          /* If the local cell is also active, more stuff will be needed.
+           */
+          if (ci_active) {
+            scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
+          }
         }
 
-        /* /\* If the local cell is active, send its ti_end values. *\/ */
-        /* if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti,
-         * cj_nodeID); */
+        /* If the local cell is active, send its ti_end values. */
+        if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
       }
 #endif
     }
diff --git a/src/cell.h b/src/cell.h
index 0fb05428a37cdc04e828ba3b024c29b5d509ceb0..85dcd2ac7b62bacf360a651bdefc566abb58913a 100644
--- a/src/cell.h
+++ b/src/cell.h
@@ -472,8 +472,11 @@ struct cell {
     /*! Linked list of the tasks computing this cell's star feedback. */
     struct link *feedback;
 
-    /*! The task computing this cell's sorts. */
-    struct task *sorts;
+    /*! The task computing this cell's sorts before the density. */
+    struct task *sorts_local;
+
+    /*! The task computing this cell's sorts before the feedback. */
+    struct task *sorts_foreign;
 
     /*! Max smoothing length in this cell. */
     double h_max;
@@ -570,6 +573,14 @@ struct cell {
       struct link *send;
     } grav;
 
+    struct {
+      /* Task receiving spart data. */
+      struct task *recv;
+
+      /* Linked list for sending spart data. */
+      struct link *send;
+    } stars;
+
     /* Task receiving data (time-step). */
     struct task *recv_ti;
 
@@ -990,25 +1001,6 @@ cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
           cj->dmin);
 }
 
-/**
- * @brief Have star particles in a pair of cells moved too much and require a
- * rebuild
- * ?
- *
- * @param ci The first #cell.
- * @param cj The second #cell.
- */
-__attribute__((always_inline)) INLINE static int
-cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
-
-  /* Is the cut-off radius plus the max distance the parts in both cells have */
-  /* moved larger than the cell size ? */
-  /* Note ci->dmin == cj->dmin */
-  return (kernel_gamma * max(ci->stars.h_max, cj->stars.h_max) +
-              ci->stars.dx_max_part + cj->stars.dx_max_part >
-          cj->dmin);
-}
-
 /**
  * @brief Add a unique tag to a cell, mostly for MPI communications.
  *
diff --git a/src/engine_maketasks.c b/src/engine_maketasks.c
index c5b7cb15ad3deab13702c3932acc5da00d2e5a4b..4d03d80b67b0732b7fea87b69d4110a839a01faf 100644
--- a/src/engine_maketasks.c
+++ b/src/engine_maketasks.c
@@ -210,11 +210,11 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
  * @param ci The sending #cell.
  * @param cj Dummy cell containing the nodeID of the receiving node.
  * @param t_xv The send_xv #task, if it has already been created.
- * @param t_rho The send_rho #task, if it has already been created.
+ * @param t_feed The send_feed #task, if it has already been created.
  */
 void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
                                 struct cell *cj, struct task *t_xv,
-                                struct task *t_rho) {
+                                struct task *t_feed) {
 
 #ifdef WITH_MPI
   struct link *l = NULL;
@@ -238,24 +238,17 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
       }
     }
 
-    // TODO Alexei: I guess that you can assume that if the send_xv exists,
-    // send_rho exists too
-
     if (t_xv == NULL) {
+      /* Make sure this cell is tagged. */
+      cell_ensure_tagged(ci);
 
       /* Already exists, just need to get it */
       if (hydro != NULL) {
-        // TODO Alexei: set t_feedback
         t_xv = hydro->t;
 
         /* This task does not exists, need to create it */
       } else {
 
-        // TODO Alexei: create task and do correct unlocks
-
-        /* Make sure this cell is tagged. */
-        cell_ensure_tagged(ci);
-
         /* Create the tasks and their dependencies? */
         t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv,
                                  ci->mpi.tag, 0, ci, cj);
@@ -263,20 +256,26 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
         /* Drift before you send */
         scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
       }
+
+      /* Create the tasks and their dependencies? */
+      t_feed = scheduler_addtask(s, task_type_send, task_subtype_spart,
+                                 ci->mpi.tag, 0, ci, cj);
+
+      /* Ghost before you send */
+      scheduler_addunlock(s, ci->super->stars.ghost_out, t_feed);
     }
 
     if (hydro == NULL) {
       engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv);
-      // TODO Alexei: addlink
-      /* engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho); */
     }
+    engine_addlink(e, &ci->mpi.stars.send, t_feed);
   }
 
   /* Recurse? */
   if (ci->split)
     for (int k = 0; k < 8; k++)
       if (ci->progeny[k] != NULL)
-        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_xv, t_rho);
+        engine_addtasks_send_stars(e, ci->progeny[k], cj, t_xv, t_feed);
 
 #else
   error("SWIFT was not compiled with MPI support.");
@@ -312,6 +311,12 @@ void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
           (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
         break;
 
+  if (l == NULL)
+    for (l = ci->stars.density; l != NULL; l = l->next)
+      if (l->t->ci->nodeID == nodeID ||
+          (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
+        break;
+
   /* If found anything, attach send tasks. */
   if (l != NULL) {
 
@@ -418,10 +423,10 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
  * @param e The #engine.
  * @param c The foreign #cell.
  * @param t_xv The recv_xv #task, if it has already been created.
- * @param t_rho The recv_rho #task, if it has already been created.
+ * @param t_feed The recv_feed #task, if it has already been created.
  */
 void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
-                                struct task *t_xv, struct task *t_rho) {
+                                struct task *t_xv, struct task *t_feed) {
 
 #ifdef WITH_MPI
   struct scheduler *s = &e->sched;
@@ -440,40 +445,38 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
       new_task = 1;
       t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag,
                                0, c, NULL);
-      // TODO Alexei: create t_feedback task
-      /* t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho,
-       * c->mpi.tag, */
-      /*                           0, c, NULL); */
     } else {
-      // TODO Alexei: set t_feedback
       t_xv = c->mpi.hydro.recv_xv;
     }
+    t_feed = scheduler_addtask(s, task_type_recv, task_subtype_spart,
+                               c->mpi.tag, 0, c, NULL);
+
+    /* Need to sort task before feedback loop */
+    scheduler_addunlock(s, t_feed, c->super->stars.sorts_foreign);
   }
 
-  // TODO Alexei: set pointer
   c->mpi.hydro.recv_xv = t_xv;
-  /* c->mpi.hydro.recv_rho = t_rho; */
+  c->mpi.stars.recv = t_feed;
 
   /* Add dependencies. */
   if (c->hydro.sorts != NULL && new_task) {
     scheduler_addunlock(s, t_xv, c->hydro.sorts);
   }
 
-  // TODO Alexei: You will need to sort the particles after receiving the spart
-
   for (struct link *l = c->stars.density; l != NULL; l = l->next) {
     scheduler_addunlock(s, t_xv, l->t);
-    // TODO Alexei: I guess that you will need to unlock the recv here
-    /* scheduler_addunlock(s, l->t, t_rho); */
+    scheduler_addunlock(s, l->t, t_feed);
   }
-  // TODO Alexei: unlock feedback task
-  /* for (struct link *l = c->hydro.force; l != NULL; l = l->next) */
-  /*   scheduler_addunlock(s, t_rho, l->t); */
+
+  for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
+    scheduler_addunlock(s, t_feed, l->t);
+  }
+
   /* Recurse? */
   if (c->split)
     for (int k = 0; k < 8; k++)
       if (c->progeny[k] != NULL)
-        engine_addtasks_recv_stars(e, c->progeny[k], t_xv, t_rho);
+        engine_addtasks_recv_stars(e, c->progeny[k], t_xv, t_feed);
 
 #else
   error("SWIFT was not compiled with MPI support.");
@@ -555,6 +558,9 @@ void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
   for (struct link *l = c->hydro.force; l != NULL; l = l->next)
     scheduler_addunlock(s, l->t, t_ti);
 
+  for (struct link *l = c->stars.feedback; l != NULL; l = l->next)
+    scheduler_addunlock(s, l->t, t_ti);
+
   /* Recurse? */
   if (c->split)
     for (int k = 0; k < 8; k++)
@@ -873,13 +879,16 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
 
   /* Are we in a super-cell ? */
   if (c->super == c) {
+    /* Foreign tasks only */
+    if (c->nodeID != e->nodeID) {
+      c->stars.sorts_foreign = scheduler_addtask(
+          s, task_type_stars_sort_foreign, task_subtype_none, 0, 0, c, NULL);
+    }
 
     /* Local tasks only... */
     if (c->nodeID == e->nodeID) {
-      // TODO Alexei: do not need to be only on local node with feedback
-      /* Add the sort task. */
-      c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
-                                         task_subtype_none, 0, 0, c, NULL);
+      c->stars.sorts_local = scheduler_addtask(
+          s, task_type_stars_sort_local, task_subtype_none, 0, 0, c, NULL);
 
       /* Generate the ghost tasks. */
       c->stars.ghost_in =
@@ -1144,11 +1153,19 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
     }
 
     /* Link stars sort tasks to all the higher sort task. */
-    if (t_type == task_type_stars_sort) {
+    if (t_type == task_type_stars_sort_local) {
       for (struct cell *finger = t->ci->parent; finger != NULL;
-           finger = finger->parent)
-        if (finger->stars.sorts != NULL)
-          scheduler_addunlock(sched, t, finger->stars.sorts);
+           finger = finger->parent) {
+        if (finger->stars.sorts_local != NULL)
+          scheduler_addunlock(sched, t, finger->stars.sorts_local);
+      }
+    }
+    if (t_type == task_type_stars_sort_foreign) {
+      for (struct cell *finger = t->ci->parent; finger != NULL;
+           finger = finger->parent) {
+        if (finger->stars.sorts_foreign != NULL)
+          scheduler_addunlock(sched, t, finger->stars.sorts_foreign);
+      }
     }
 
     /* Link self tasks to cells. */
@@ -1744,7 +1761,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
     struct task *t = &((struct task *)map_data)[ind];
 
     /* Sort tasks depend on the drift and gravity drift of the cell. */
-    if (t->type == task_type_stars_sort && t->ci->nodeID == engine_rank) {
+    if (t->type == task_type_stars_sort_local) {
       scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
       scheduler_addunlock(sched, t->ci->super->grav.drift, t);
     }
@@ -1753,9 +1770,6 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
     else if (t->type == task_type_self &&
              t->subtype == task_subtype_stars_density) {
 
-      /* Make the self-density tasks depend on the drifts. */
-      scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
-
       /* Make the self-density tasks depend on the drift and gravity drift. */
       scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
       scheduler_addunlock(sched, t->ci->super->grav.drift, t);
@@ -1787,9 +1801,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
 
       if (t->ci->nodeID == engine_rank) {
         scheduler_addunlock(sched, t->ci->super->grav.drift, t);
-        // TODO Alexei: the stars in foreign cells need to be sorted before
-        // the feedback loop and after the ghosts
-        scheduler_addunlock(sched, t->ci->super->stars.sorts, t);
+        scheduler_addunlock(sched, t->ci->super->stars.sorts_local, t);
       }
 
       if (t->ci->hydro.super != t->cj->hydro.super) {
@@ -1801,8 +1813,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
       if (t->ci->super != t->cj->super) {
         if (t->cj->nodeID == engine_rank) {
           scheduler_addunlock(sched, t->cj->super->grav.drift, t);
-          // TODO Alexei: same here, sort before feedback
-          scheduler_addunlock(sched, t->cj->super->stars.sorts, t);
+          scheduler_addunlock(sched, t->cj->super->stars.sorts_local, t);
         }
       }
 
@@ -1811,6 +1822,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
           scheduler_addtask(sched, task_type_pair, task_subtype_stars_feedback,
                             0, 0, t->ci, t->cj);
 
+      /* Add sort before feedback loop */
+      if (t->ci->nodeID != engine_rank) {
+        scheduler_addunlock(sched, t->ci->super->stars.sorts_foreign, t2);
+      }
+      if (t->ci->super != t->cj->super) {
+        if (t->cj->nodeID != engine_rank) {
+          scheduler_addunlock(sched, t->cj->super->stars.sorts_foreign, t2);
+        }
+      }
+
       /* Add the link between the new loop and both cells */
       engine_addlink(e, &t->ci->stars.feedback, t2);
       engine_addlink(e, &t->cj->stars.feedback, t2);
@@ -1821,10 +1842,10 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
         scheduler_addunlock(sched, t2, t->ci->super->end_force);
       }
       if (t->cj->nodeID == nodeID) {
-        if (t->ci->super != t->cj->super)
+        if (t->ci->super != t->cj->super) {
           engine_make_stars_loops_dependencies(sched, t, t2, t->cj);
-        if (t->ci->super != t->cj->super)
           scheduler_addunlock(sched, t2, t->cj->super->end_force);
+        }
       }
     }
 
@@ -1837,7 +1858,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
       scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
       scheduler_addunlock(sched, t->ci->hydro.super->hydro.sorts, t);
       scheduler_addunlock(sched, t->ci->super->grav.drift, t);
-      scheduler_addunlock(sched, t->ci->super->stars.sorts, t);
+      scheduler_addunlock(sched, t->ci->super->stars.sorts_local, t);
 
       /* Start by constructing the task for the second stars loop */
       struct task *t2 = scheduler_addtask(sched, task_type_sub_self,
@@ -1866,8 +1887,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
 
       if (t->cj->nodeID == engine_rank) {
         scheduler_addunlock(sched, t->cj->super->grav.drift, t);
-        // TODO Alexei: Still the same
-        scheduler_addunlock(sched, t->cj->super->stars.sorts, t);
+        scheduler_addunlock(sched, t->cj->super->stars.sorts_local, t);
       }
 
       if (t->ci->hydro.super != t->cj->hydro.super) {
@@ -1879,8 +1899,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
       if (t->ci->super != t->cj->super) {
         if (t->ci->nodeID == engine_rank) {
           scheduler_addunlock(sched, t->ci->super->grav.drift, t);
-          // TODO Alexei: still the same
-          scheduler_addunlock(sched, t->ci->super->stars.sorts, t);
+          scheduler_addunlock(sched, t->ci->super->stars.sorts_local, t);
         }
       }
 
@@ -1889,6 +1908,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
                                           task_subtype_stars_feedback, t->flags,
                                           0, t->ci, t->cj);
 
+      /* Add the sort before feedback */
+      if (t->cj->nodeID != engine_rank) {
+        scheduler_addunlock(sched, t->cj->super->stars.sorts_foreign, t2);
+      }
+      if (t->ci->super != t->cj->super) {
+        if (t->ci->nodeID != engine_rank) {
+          scheduler_addunlock(sched, t->ci->super->stars.sorts_foreign, t2);
+        }
+      }
+
       /* Add the link between the new loop and both cells */
       engine_addlink(e, &t->ci->stars.feedback, t2);
       engine_addlink(e, &t->cj->stars.feedback, t2);
diff --git a/src/engine_marktasks.c b/src/engine_marktasks.c
index d3737c24aa190d521c7badf8ba54f84a6689f35c..55dddbab9b6f040914d0a05ac9fad8b7a9ecf2ff 100644
--- a/src/engine_marktasks.c
+++ b/src/engine_marktasks.c
@@ -71,19 +71,15 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
   /* Activate the send/recv tasks. */
   if (ci_nodeID != nodeID) {
 
-    // TODO Alexei: here I think you will just need to uncomment the code
-    // and modify it from hydro to stars (this is almost just a copy from the
-    // hydro)
-    /* If the local cell is active, receive data from the foreign cell. */
     if (cj_active_stars) {
       scheduler_activate(s, ci->mpi.hydro.recv_xv);
-      /* if (ci_active_hydro) { */
-      /* 	scheduler_activate(s, ci->mpi.hydro.recv_rho); */
-      /* } */
+      if (ci_active_stars) {
+        scheduler_activate(s, ci->mpi.stars.recv);
+      }
     }
 
     /* If the foreign cell is active, we want its ti_end values. */
-    /* if (ci_active_stars) scheduler_activate(s, ci->mpi.recv_ti); */
+    if (ci_active_stars) scheduler_activate(s, ci->mpi.recv_ti);
 
     /* Is the foreign cell active and will need stuff from us? */
     if (ci_active_stars) {
@@ -97,29 +93,28 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
       cell_activate_drift_part(l->t->ci, s);
 
       /* If the local cell is also active, more stuff will be needed. */
-      /* if (cj_active_hydro) { */
-      /* 	scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
-       */
-
-      /* } */
+      if (cj_active_stars) {
+        scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
+      }
     }
 
     /* If the local cell is active, send its ti_end values. */
-    /* if (cj_active_hydro) */
-    /*   scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID); */
+    if (cj_active_stars) {
+      scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
+    }
 
   } else if (cj_nodeID != nodeID) {
     /* If the local cell is active, receive data from the foreign cell. */
     if (ci_active_stars) {
 
       scheduler_activate(s, cj->mpi.hydro.recv_xv);
-      /* if (cj_active_hydro) { */
-      /* 	scheduler_activate(s, cj->mpi.hydro.recv_rho); */
-      /* } */
+      if (cj_active_stars) {
+        scheduler_activate(s, cj->mpi.stars.recv);
+      }
     }
 
     /* If the foreign cell is active, we want its ti_end values. */
-    /* if (cj_active_hydro) scheduler_activate(s, cj->mpi.recv_ti); */
+    if (cj_active_stars) scheduler_activate(s, cj->mpi.recv_ti);
 
     /* Is the foreign cell active and will need stuff from us? */
     if (cj_active_stars) {
@@ -133,17 +128,15 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
       cell_activate_drift_part(l->t->ci, s);
 
       /* If the local cell is also active, more stuff will be needed. */
-      /* if (ci_active_hydro) { */
-
-      /* 	scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
-       */
-
-      /* } */
+      if (ci_active_stars) {
+        scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
+      }
     }
 
     /* If the local cell is active, send its ti_end values. */
-    /* if (ci_active_hydro) */
-    /*   scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID); */
+    if (ci_active_stars) {
+      scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
+    }
   }
 }
 #endif
diff --git a/src/error.h b/src/error.h
index d384ec56ba0dc3562160d94911e3e3d3bb786211..181a32c78b4ef09708eeade002157157895ddde5 100644
--- a/src/error.h
+++ b/src/error.h
@@ -54,7 +54,7 @@ extern int engine_rank;
     fprintf(stderr, "[%04i] %s %s:%s():%i: " s "\n", engine_rank,          \
             clocks_get_timesincestart(), __FILE__, __FUNCTION__, __LINE__, \
             ##__VA_ARGS__);                                                \
-    MPI_Abort(MPI_COMM_WORLD, -1);                                         \
+    swift_abort(1);                                                        \
   })
 #else
 #define error(s, ...)                                                      \
diff --git a/src/runner.c b/src/runner.c
index a41bcbe2359ba0336d112f6a0faa6144c4e47923..153b91d5f4f9d3e79a9a7a4c52564979a1d771dc 100644
--- a/src/runner.c
+++ b/src/runner.c
@@ -199,6 +199,7 @@ void runner_do_stars_ghost(struct runner *r, struct cell *c, int timer) {
           /* Skip if h is already h_max and we don't have enough neighbours */
           if ((sp->h >= stars_h_max) && (f < 0.f)) {
 
+            stars_reset_acceleration(sp);
             /* Ok, we are done with this particle */
             continue;
           }
@@ -249,6 +250,7 @@ void runner_do_stars_ghost(struct runner *r, struct cell *c, int timer) {
         }
 
         /* We now have a particle whose smoothing length has converged */
+        stars_reset_acceleration(sp);
 
         /* Compute the stellar evolution  */
         stars_evolve_spart(sp, stars_properties, cosmo);
@@ -2834,7 +2836,8 @@ void *runner_main(void *data) {
           /* Reset the sort flags as our work here is done. */
           t->flags = 0;
           break;
-        case task_type_stars_sort:
+        case task_type_stars_sort_local:
+        case task_type_stars_sort_foreign:
           /* Cleanup only if any of the indices went stale. */
           runner_do_stars_sort(
               r, ci, t->flags,
@@ -2896,7 +2899,7 @@ void *runner_main(void *data) {
           } else if (t->subtype == task_subtype_gpart) {
             runner_do_recv_gpart(r, ci, 1);
           } else if (t->subtype == task_subtype_spart) {
-            runner_do_recv_spart(r, ci, 0, 1);
+            runner_do_recv_spart(r, ci, 1, 1);
           } else if (t->subtype == task_subtype_multipole) {
             cell_unpack_multipoles(ci, (struct gravity_tensors *)t->buff);
             free(t->buff);
diff --git a/src/runner_doiact_stars.h b/src/runner_doiact_stars.h
index 40bea6cfc03871ddbeef0edbe5df38761ee115df..9cf9ea1b5e1020997b5f957442569866c0e03833 100644
--- a/src/runner_doiact_stars.h
+++ b/src/runner_doiact_stars.h
@@ -1020,6 +1020,8 @@ void DOSELF1_BRANCH_STARS(struct runner *r, struct cell *c) {
                                                                             \
     for (int pjd = 0; pjd < cj->TYPE.count; pjd++) {                        \
       const struct PART *p = &cj->TYPE.parts[sort_j[pjd].i];                \
+      if (PART##_is_inhibited(p, e)) continue;                              \
+                                                                            \
       const float d = p->x[0] * runner_shift[sid][0] +                      \
                       p->x[1] * runner_shift[sid][1] +                      \
                       p->x[2] * runner_shift[sid][2];                       \
@@ -1032,9 +1034,9 @@ void DOSELF1_BRANCH_STARS(struct runner *r, struct cell *c) {
             "cj->nodeID=%d "                                                \
             "ci->nodeID=%d d=%e sort_j[pjd].d=%e cj->" #TYPE                \
             ".dx_max_sort=%e "                                              \
-            "cj->" #TYPE ".dx_max_sort_old=%e",                             \
+            "cj->" #TYPE ".dx_max_sort_old=%e, %i",                         \
             cj->nodeID, ci->nodeID, d, sort_j[pjd].d, cj->TYPE.dx_max_sort, \
-            cj->TYPE.dx_max_sort_old);                                      \
+            cj->TYPE.dx_max_sort_old, cj->cellID);                          \
     }                                                                       \
   })
 
diff --git a/src/scheduler.c b/src/scheduler.c
index be824432119ce6beb185dfc7d95ae2821146c2d8..d5fe7b61e09754e20773795302a2ebea65bba860 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -1989,7 +1989,8 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
                (sizeof(int) * 8 - intrinsics_clz(t->ci->hydro.count));
         break;
 
-      case task_type_stars_sort:
+      case task_type_stars_sort_local:
+      case task_type_stars_sort_foreign:
         cost = wscale * intrinsics_popcount(t->flags) * scount_i *
                (sizeof(int) * 8 - intrinsics_clz(t->ci->stars.count));
         break;
@@ -2283,7 +2284,8 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
       case task_type_kick2:
       case task_type_stars_ghost:
       case task_type_logger:
-      case task_type_stars_sort:
+      case task_type_stars_sort_local:
+      case task_type_stars_sort_foreign:
       case task_type_timestep:
         qid = t->ci->super->owner;
         break;
diff --git a/src/scheduler.h b/src/scheduler.h
index f1e130c6ce2a8538b0126e86ee0cbd79cf5a0e0d..3ee92b2bdd114e23c8a5368506ef0ebba7d889f3 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -140,10 +140,13 @@ __attribute__((always_inline)) INLINE static void scheduler_activate(
 __attribute__((always_inline)) INLINE static struct link *
 scheduler_activate_send(struct scheduler *s, struct link *link, int nodeID) {
 
+  if (link == NULL) error("");
   struct link *l = NULL;
   for (l = link; l != NULL && l->t->cj->nodeID != nodeID; l = l->next)
     ;
-  if (l == NULL) error("Missing link to send task.");
+  if (l == NULL) {
+    error("Missing link to send task.");
+  }
   scheduler_activate(s, l->t);
   return l;
 }
diff --git a/src/space.c b/src/space.c
index 35aaffa66d5b921ec687350588ac91e3a52bb59f..a420eabdadd1c9c033e7d05fea22b18c0a6ebd7e 100644
--- a/src/space.c
+++ b/src/space.c
@@ -183,7 +183,8 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
       space_recycle_list(s, cell_rec_begin, cell_rec_end, multipole_rec_begin,
                          multipole_rec_end);
     c->hydro.sorts = NULL;
-    c->stars.sorts = NULL;
+    c->stars.sorts_local = NULL;
+    c->stars.sorts_foreign = NULL;
     c->nr_tasks = 0;
     c->grav.nr_mm_tasks = 0;
     c->hydro.density = NULL;
@@ -271,12 +272,14 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
     c->mpi.hydro.recv_rho = NULL;
     c->mpi.hydro.recv_gradient = NULL;
     c->mpi.grav.recv = NULL;
+    c->mpi.stars.recv = NULL;
     c->mpi.recv_ti = NULL;
 
     c->mpi.hydro.send_xv = NULL;
     c->mpi.hydro.send_rho = NULL;
     c->mpi.hydro.send_gradient = NULL;
     c->mpi.grav.send = NULL;
+    c->mpi.stars.send = NULL;
     c->mpi.send_ti = NULL;
 #endif
   }
@@ -543,6 +546,8 @@ void space_regrid(struct space *s, int verbose) {
           c->mpi.hydro.send_xv = NULL;
           c->mpi.hydro.send_rho = NULL;
           c->mpi.hydro.send_gradient = NULL;
+          c->mpi.stars.send = NULL;
+          c->mpi.stars.recv = NULL;
           c->mpi.grav.recv = NULL;
           c->mpi.grav.send = NULL;
 #endif  // WITH_MPI
diff --git a/src/stars/Default/stars.h b/src/stars/Default/stars.h
index 688a52b7a761eef241ccb429f930bc8bf75d7d39..a8faf43c1aad4e9c9a300c99be75dede02e85565 100644
--- a/src/stars/Default/stars.h
+++ b/src/stars/Default/stars.h
@@ -145,4 +145,18 @@ __attribute__((always_inline)) INLINE static void stars_evolve_spart(
     struct spart* restrict sp, const struct stars_props* stars_properties,
     const struct cosmology* cosmo) {}
 
+/**
+ * @brief Reset acceleration fields of a particle
+ *
+ * This is the equivalent of hydro_reset_acceleration.
+ * We do not compute the acceleration on star, therefore no need to use it.
+ *
+ * @param p The particle to act upon
+ */
+__attribute__((always_inline)) INLINE static void stars_reset_acceleration(
+    struct spart* restrict p) {
+#ifdef DEBUG_INTERACTIONS_STARS
+  p->num_ngb_force = 0;
+#endif
+}
 #endif /* SWIFT_DEFAULT_STARS_H */
diff --git a/src/stars/Default/stars_iact.h b/src/stars/Default/stars_iact.h
index c543054f5f68a6767fd320c49e485832e0cc5a20..7e36d4c4af6d121f804b9c43d942ed9c0abc2d0b 100644
--- a/src/stars/Default/stars_iact.h
+++ b/src/stars/Default/stars_iact.h
@@ -52,4 +52,10 @@ runner_iact_nonsym_stars_density(float r2, const float *dx, float hi, float hj,
 __attribute__((always_inline)) INLINE static void
 runner_iact_nonsym_stars_feedback(float r2, const float *dx, float hi, float hj,
                                   struct spart *restrict si,
-                                  struct part *restrict pj, float a, float H) {}
+                                  struct part *restrict pj, float a, float H) {
+
+#ifdef DEBUG_INTERACTIONS_STARS
+  /* Update ngb counters */
+  ++si->num_ngb_force;
+#endif
+}
diff --git a/src/stars/Default/stars_io.h b/src/stars/Default/stars_io.h
index d5d692e403116669e7ed08dae19a0f57ff24696b..fbed04a00e2ab9b2aad0937b74d92612094a56c1 100644
--- a/src/stars/Default/stars_io.h
+++ b/src/stars/Default/stars_io.h
@@ -60,12 +60,8 @@ INLINE static void stars_write_particles(const struct spart *sparts,
                                          struct io_props *list,
                                          int *num_fields) {
 
-/* Say how much we want to write */
-#ifdef DEBUG_INTERACTIONS_STARS
-  *num_fields = 6;
-#else
+  /* Say how much we want to write */
   *num_fields = 5;
-#endif
 
   /* List what we want to write */
   list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH,
@@ -80,8 +76,14 @@ INLINE static void stars_write_particles(const struct spart *sparts,
                                  sparts, h);
 
 #ifdef DEBUG_INTERACTIONS_STARS
-  list[5] = io_make_output_field("NumberNeighbors", INT, 1, UNIT_CONV_NO_UNITS,
+
+  list += *num_fields;
+  *num_fields += 2;
+
+  list[0] = io_make_output_field("Num_ngb_density", INT, 1, UNIT_CONV_NO_UNITS,
                                  sparts, num_ngb_density);
+  list[1] = io_make_output_field("Num_ngb_force", INT, 1, UNIT_CONV_NO_UNITS,
+                                 sparts, num_ngb_force);
 #endif
 }
 
diff --git a/src/stars/Default/stars_part.h b/src/stars/Default/stars_part.h
index f014beeaa0be8ecd0e77db86103405f4d63935ae..c487528f0115b053351cef00baecfb3a657f0f00 100644
--- a/src/stars/Default/stars_part.h
+++ b/src/stars/Default/stars_part.h
@@ -78,6 +78,9 @@ struct spart {
 #ifdef DEBUG_INTERACTIONS_STARS
   /*! Number of interactions in the density SELF and PAIR */
   int num_ngb_density;
+
+  /*! Number of interactions in the force SELF and PAIR */
+  int num_ngb_force;
 #endif
 
 } SWIFT_STRUCT_ALIGN;
diff --git a/src/task.c b/src/task.c
index 0ac912b300a3a72e53ac0552ebdec378bb786f52..097a6f1cf8a7dc9d0dd0721396170bca6d04e452 100644
--- a/src/task.c
+++ b/src/task.c
@@ -79,7 +79,8 @@ const char *taskID_names[task_type_count] = {"none",
                                              "stars_ghost_in",
                                              "stars_ghost",
                                              "stars_ghost_out",
-                                             "stars_sort"};
+                                             "stars_sort_local",
+                                             "stars_sort_foreign"};
 
 /* Sub-task type names. */
 const char *subtaskID_names[task_subtype_count] = {
@@ -148,7 +149,8 @@ __attribute__((always_inline)) INLINE static enum task_actions task_acts_on(
       return task_action_all;
 
     case task_type_stars_ghost:
-    case task_type_stars_sort:
+    case task_type_stars_sort_local:
+    case task_type_stars_sort_foreign:
       return task_action_spart;
       break;
 
@@ -345,7 +347,8 @@ void task_unlock(struct task *t) {
       cell_gunlocktree(ci);
       break;
 
-    case task_type_stars_sort:
+    case task_type_stars_sort_local:
+    case task_type_stars_sort_foreign:
       cell_sunlocktree(ci);
       break;
 
@@ -466,7 +469,8 @@ int task_lock(struct task *t) {
       if (cell_locktree(ci) != 0) return 0;
       break;
 
-    case task_type_stars_sort:
+    case task_type_stars_sort_local:
+    case task_type_stars_sort_foreign:
       if (ci->stars.hold) return 0;
       if (cell_slocktree(ci) != 0) return 0;
       break;
@@ -656,7 +660,10 @@ void task_get_group_name(int type, int subtype, char *cluster) {
       strcpy(cluster, "Gravity");
       break;
     case task_subtype_stars_density:
-      strcpy(cluster, "Stars");
+      strcpy(cluster, "StarsDensity");
+      break;
+    case task_subtype_stars_feedback:
+      strcpy(cluster, "StarsFeedback");
       break;
     default:
       strcpy(cluster, "None");
diff --git a/src/task.h b/src/task.h
index a6782a6302e2f234f02d2b4e3052a11cb388dc31..89d7f5884e5a4f64bd06a0f761f7b3d79001fd7d 100644
--- a/src/task.h
+++ b/src/task.h
@@ -71,7 +71,8 @@ enum task_types {
   task_type_stars_ghost_in,
   task_type_stars_ghost,
   task_type_stars_ghost_out,
-  task_type_stars_sort,
+  task_type_stars_sort_local,
+  task_type_stars_sort_foreign,
   task_type_count
 } __attribute__((packed));