diff --git a/src/scheduler.c b/src/scheduler.c
index 367fbe599a14e86fd592569c9e2b1de8a45154e0..34fe1d0ec3eb6340a1f1ee6dbed8757b2b8a389f 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -1680,6 +1680,14 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
               t->ci->hydro.count * sizeof(struct black_holes_part_data),
               MPI_BYTE, t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
               &t->req);
+        } else if (t->subtype == task_subtype_bpart_merger) {
+          t->buff = (struct black_holes_bpart_data *)malloc(
+              sizeof(struct black_holes_bpart_data) * t->ci->black_holes.count);
+          err = MPI_Irecv(
+              t->buff,
+              t->ci->black_holes.count * sizeof(struct black_holes_bpart_data),
+              MPI_BYTE, t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
+              &t->req);
         } else if (t->subtype == task_subtype_xv ||
                    t->subtype == task_subtype_rho ||
                    t->subtype == task_subtype_gradient) {
@@ -1821,6 +1829,26 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
                 MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
                 &t->req);
           }
+        } else if (t->subtype == task_subtype_bpart_merger) {
+          t->buff = (struct black_holes_bpart_data *)malloc(
+              sizeof(struct black_holes_bpart_data) * t->ci->black_holes.count);
+          cell_pack_bpart_swallow(t->ci,
+                                  (struct black_holes_bpart_data *)t->buff);
+
+          if (t->ci->black_holes.count * sizeof(struct black_holes_bpart_data) >
+              s->mpi_message_limit) {
+            err = MPI_Isend(t->buff,
+                            t->ci->black_holes.count *
+                                sizeof(struct black_holes_bpart_data),
+                            MPI_BYTE, t->cj->nodeID, t->flags,
+                            subtaskMPI_comms[t->subtype], &t->req);
+          } else {
+            err = MPI_Issend(t->buff,
+                             t->ci->black_holes.count *
+                                 sizeof(struct black_holes_bpart_data),
+                             MPI_BYTE, t->cj->nodeID, t->flags,
+                             subtaskMPI_comms[t->subtype], &t->req);
+          }
 
         } else if (t->subtype == task_subtype_xv ||
                    t->subtype == task_subtype_rho ||