diff --git a/src/runner.c b/src/runner.c
index 2f072ffd4b9b896067ef2ef9ce8a28e821978d20..71fcca63f499f608b103893444dc524b563b66fc 100644
--- a/src/runner.c
+++ b/src/runner.c
@@ -1537,6 +1537,9 @@ void *runner_main(void *data) {
             free(t->buff);
           } else if (t->subtype == task_subtype_xv) {
             runner_do_recv_part(r, ci, 1);
+            /* Only need to update h_max if the cell has active particles */
+          } else if (t->subtype == task_subtype_rho && cell_is_active(ci, e)) {
+            runner_do_recv_part(r, ci, 1);
           } else if (t->subtype == task_subtype_gpart) {
             runner_do_recv_gpart(r, ci, 1);
           }
diff --git a/src/scheduler.c b/src/scheduler.c
index af2d1d0fb0e08d82dc91afa731e13885935d7d4d..8b111a353d91e8dbf1db776ff50e4937809701a2 100644
--- a/src/scheduler.c
+++ b/src/scheduler.c
@@ -1202,9 +1202,9 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
                    t->subtype == task_subtype_rho) {
           err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type,
                           t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
-	  // message( "receiving %i parts with tag=%i from %i to %i." ,
-	  //     t->ci->count , t->flags , t->ci->nodeID , s->nodeID );
-	  // fflush(stdout);
+          // message( "receiving %i parts with tag=%i from %i to %i." ,
+          //     t->ci->count , t->flags , t->ci->nodeID , s->nodeID );
+          // fflush(stdout);
         } else if (t->subtype == task_subtype_gpart) {
           err = MPI_Irecv(t->ci->gparts, t->ci->gcount, gpart_mpi_type,
                           t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
@@ -1237,10 +1237,10 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
           err = MPI_Isend(t->ci->parts, t->ci->count, part_mpi_type,
                           t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
 
-	  // message( "sending %i parts with tag=%i from %i to %i." ,
-	  //     t->ci->count , t->flags , s->nodeID , t->cj->nodeID );
-	  // fflush(stdout);
-	} else if (t->subtype == task_subtype_gpart) {
+          // message( "sending %i parts with tag=%i from %i to %i." ,
+          //     t->ci->count , t->flags , s->nodeID , t->cj->nodeID );
+          // fflush(stdout);
+        } else if (t->subtype == task_subtype_gpart) {
           err = MPI_Isend(t->ci->gparts, t->ci->gcount, gpart_mpi_type,
                           t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
         } else {