diff --git a/src/cell.c b/src/cell.c
index 657cd85cea63635c532bd05e4425600e2f7fefb5..dbccfd2f42cabf38417cd87de0450489240884be 100644
--- a/src/cell.c
+++ b/src/cell.c
@@ -1385,6 +1385,9 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
         scheduler_activate(s, ci->recv_xv);
         if (cell_is_active(ci, e)) {
           scheduler_activate(s, ci->recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+          scheduler_activate(s, ci->recv_gradient);
+#endif
           scheduler_activate(s, ci->recv_ti);
         }
 
@@ -1404,12 +1407,21 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
         if (t->type == task_type_pair) scheduler_activate(s, cj->drift_part);
 
         if (cell_is_active(cj, e)) {
+
           for (l = cj->send_rho; l != NULL && l->t->cj->nodeID != ci->nodeID;
                l = l->next)
             ;
           if (l == NULL) error("Missing link to send_rho task.");
           scheduler_activate(s, l->t);
 
+#ifdef EXTRA_HYDRO_LOOP
+          for (l = cj->send_gradient;
+               l != NULL && l->t->cj->nodeID != ci->nodeID; l = l->next)
+            ;
+          if (l == NULL) error("Missing link to send_gradient task.");
+          scheduler_activate(s, l->t);
+#endif
+
           for (l = cj->send_ti; l != NULL && l->t->cj->nodeID != ci->nodeID;
                l = l->next)
             ;
@@ -1423,6 +1435,9 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
         scheduler_activate(s, cj->recv_xv);
         if (cell_is_active(cj, e)) {
           scheduler_activate(s, cj->recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+          scheduler_activate(s, cj->recv_gradient);
+#endif
           scheduler_activate(s, cj->recv_ti);
         }
 
@@ -1442,12 +1457,21 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
         if (t->type == task_type_pair) scheduler_activate(s, ci->drift_part);
 
         if (cell_is_active(ci, e)) {
+
           for (l = ci->send_rho; l != NULL && l->t->cj->nodeID != cj->nodeID;
                l = l->next)
             ;
           if (l == NULL) error("Missing link to send_rho task.");
           scheduler_activate(s, l->t);
 
+#ifdef EXTRA_HYDRO_LOOP
+          for (l = ci->send_gradient;
+               l != NULL && l->t->cj->nodeID != cj->nodeID; l = l->next)
+            ;
+          if (l == NULL) error("Missing link to send_gradient task.");
+          scheduler_activate(s, l->t);
+#endif
+
           for (l = ci->send_ti; l != NULL && l->t->cj->nodeID != cj->nodeID;
                l = l->next)
             ;
diff --git a/src/engine.c b/src/engine.c
index 28b1d4b2e8b42cb865125f610f718b1d7130bd63..417c9f626d7e2f8d96d49d8d2bed942102b96e4f 100644
--- a/src/engine.c
+++ b/src/engine.c
@@ -2584,6 +2584,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
         scheduler_activate(s, ci->recv_xv);
         if (cell_is_active(ci, e)) {
           scheduler_activate(s, ci->recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+          scheduler_activate(s, ci->recv_gradient);
+#endif
           scheduler_activate(s, ci->recv_ti);
         }
 
@@ -2603,12 +2606,21 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
         if (t->type == task_type_pair) scheduler_activate(s, cj->drift_part);
 
         if (cell_is_active(cj, e)) {
+
           for (l = cj->send_rho; l != NULL && l->t->cj->nodeID != ci->nodeID;
                l = l->next)
             ;
           if (l == NULL) error("Missing link to send_rho task.");
           scheduler_activate(s, l->t);
 
+#ifdef EXTRA_HYDRO_LOOP
+          for (l = cj->send_gradient;
+               l != NULL && l->t->cj->nodeID != ci->nodeID; l = l->next)
+            ;
+          if (l == NULL) error("Missing link to send_gradient task.");
+          scheduler_activate(s, l->t);
+#endif
+
           for (l = cj->send_ti; l != NULL && l->t->cj->nodeID != ci->nodeID;
                l = l->next)
             ;
@@ -2622,6 +2634,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
         scheduler_activate(s, cj->recv_xv);
         if (cell_is_active(cj, e)) {
           scheduler_activate(s, cj->recv_rho);
+#ifdef EXTRA_HYDRO_LOOP
+          scheduler_activate(s, cj->recv_gradient);
+#endif
           scheduler_activate(s, cj->recv_ti);
         }
 
@@ -2647,6 +2662,14 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
           if (l == NULL) error("Missing link to send_rho task.");
           scheduler_activate(s, l->t);
 
+#ifdef EXTRA_HYDRO_LOOP
+          for (l = ci->send_gradient;
+               l != NULL && l->t->cj->nodeID != cj->nodeID; l = l->next)
+            ;
+          if (l == NULL) error("Missing link to send_gradient task.");
+          scheduler_activate(s, l->t);
+#endif
+
           for (l = ci->send_ti; l != NULL && l->t->cj->nodeID != cj->nodeID;
                l = l->next)
             ;
diff --git a/src/runner.c b/src/runner.c
index dd8cd7f426d80b81bbe4522f5098120731053aad..9be0a9ee2ce23888d04346679ccb36fdc6f13a02 100644
--- a/src/runner.c
+++ b/src/runner.c
@@ -1914,6 +1914,8 @@ void *runner_main(void *data) {
             runner_do_recv_part(r, ci, 1, 1);
           } else if (t->subtype == task_subtype_rho) {
             runner_do_recv_part(r, ci, 1, 1);
+          } else if (t->subtype == task_subtype_gradient) {
+            runner_do_recv_part(r, ci, 1, 1);
           } else if (t->subtype == task_subtype_gpart) {
             runner_do_recv_gpart(r, ci, 1);
           } else if (t->subtype == task_subtype_spart) {
diff --git a/src/scheduler.c b/src/scheduler.c
index 7e42c3a214cff3fe30e7c885b06c48d25eac0e8d..b07c403e4ecd960b22b51f24372ca0a3420a453f 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -1379,7 +1379,8 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
                           MPI_BYTE, t->ci->nodeID, t->flags, MPI_COMM_WORLD,
                           &t->req);
         } else if (t->subtype == task_subtype_xv ||
-                   t->subtype == task_subtype_rho) {
+                   t->subtype == task_subtype_rho ||
+                   t->subtype == task_subtype_gradient) {
           err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type,
                           t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
           // message( "receiving %i parts with tag=%i from %i to %i." ,
@@ -1414,7 +1415,8 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
                           MPI_BYTE, t->cj->nodeID, t->flags, MPI_COMM_WORLD,
                           &t->req);
         } else if (t->subtype == task_subtype_xv ||
-                   t->subtype == task_subtype_rho) {
+                   t->subtype == task_subtype_rho ||
+                   t->subtype == task_subtype_gradient) {
 #ifdef SWIFT_DEBUG_CHECKS
           for (int k = 0; k < t->ci->count; k++)
             if (t->ci->parts[k].ti_drift != s->space->e->ti_current)