diff --git a/src/runner.c b/src/runner.c index fd054184ac885f4db41d2a6deaf8b4033e962633..cfe3398684bef7b5a04cbfcefce1ebfe7729e24b 100644 --- a/src/runner.c +++ b/src/runner.c @@ -3321,8 +3321,6 @@ void *runner_main(void *data) { runner_do_recv_part(r, ci, 0, 1); } else if (t->subtype == task_subtype_gradient) { runner_do_recv_part(r, ci, 0, 1); - } else if (t->subtype == task_subtype_force) { - runner_do_recv_part(r, ci, 0, 1); } else if (t->subtype == task_subtype_limiter) { runner_do_recv_part(r, ci, 0, 1); } else if (t->subtype == task_subtype_gpart) { diff --git a/src/scheduler.c b/src/scheduler.c index 6a746cf8faca4600aa56bdd653d11958ad4df000..229c94b2088814352487032816f610a1bdf74724 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -1849,8 +1849,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { subtaskMPI_comms[t->subtype], &t->req); } else if (t->subtype == task_subtype_xv || t->subtype == task_subtype_rho || - t->subtype == task_subtype_gradient || - t->subtype == task_subtype_force) { + t->subtype == task_subtype_gradient) { err = MPI_Irecv(t->ci->hydro.parts, t->ci->hydro.count, part_mpi_type, t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype], &t->req); @@ -1898,8 +1897,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { subtaskMPI_comms[t->subtype], &t->req); } else if (t->subtype == task_subtype_xv || t->subtype == task_subtype_rho || - t->subtype == task_subtype_gradient || - t->subtype == task_subtype_force) { + t->subtype == task_subtype_gradient) { if ((t->ci->hydro.count * sizeof(struct part)) > s->mpi_message_limit) err = MPI_Isend(t->ci->hydro.parts, t->ci->hydro.count, part_mpi_type, t->cj->nodeID, t->flags,