diff --git a/swiftmpirdmastepsim.c b/swiftmpirdmastepsim.c
index 13eabdae4ff986d63f3ad020eb7000a6f8efb02a..49c39fe97f0bab5177f45b63a0e66f9f4555dee3 100644
--- a/swiftmpirdmastepsim.c
+++ b/swiftmpirdmastepsim.c
@@ -75,7 +75,7 @@ static const int task_type_send = 22;
 static const int task_type_recv = 23;
 
 /* Global communicators for each of the subtypes. */
-#define task_subtype_count 30  // Just some upper limit on subtype.
+#define task_subtype_count 22  // Just some upper limit on subtype.
 static MPI_Comm subtypeMPI_comms[task_subtype_count];
 
 /* And the windows for one-sided communications. */
@@ -184,31 +184,29 @@ static void *send_thread(void *arg) {
                          log->otherrank, MESSAGE_SIZE * myrank,
                          mpi_window[log->subtype]);
 
-    // MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
-    MPI_Win_flush_all(mpi_window[log->subtype]);
-
-    if (oldval[0] == dataptr[0]) {
-      message("sent a message to %d/%d (%zd:%zd:%zd @ %zd)", log->otherrank,
-              log->subtype, dataptr[0], oldval[0], newval[0],
-              MESSAGE_SIZE * myrank);
-    } else {
-      message("failed to send a message to %d/%d (%zd:%zd:%zd) @ %zd",
-              log->otherrank, log->subtype, dataptr[0], oldval[0], newval[0],
-              MESSAGE_SIZE * myrank);
-    }
+    MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
+
+    //if (oldval[0] == dataptr[0]) {
+    //  message("sent a message to %d/%d (%zd:%zd:%zd @ %zd)", log->otherrank,
+    //          log->subtype, dataptr[0], oldval[0], newval[0],
+    //          MESSAGE_SIZE * myrank);
+    //} else {
+    //  message("failed to send a message to %d/%d (%zd:%zd:%zd) @ %zd",
+    //          log->otherrank, log->subtype, dataptr[0], oldval[0], newval[0],
+    //          MESSAGE_SIZE * myrank);
+    //}
 
     /* Wait for completion, this is when remote flips back to LOCKED. We poll
      * on a get, as the local window is only used for receiving. Use an Rget
      * so we can use MPI_Test to get some local progression. */
     newval[0] = UNLOCKED;
     while (newval[0] != LOCKED) {
-      MPI_Win_flush_all(mpi_window[log->subtype]);
+      //MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
 
       MPI_Request request;
       MPI_Rget(&newval[0], 1, MPI_BLOCKTYPE, log->otherrank,
                MESSAGE_SIZE * myrank, 1, MPI_BLOCKTYPE,
                mpi_window[log->subtype], &request);
-
       int flag = 0;
       while (flag == 0) {
         MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
@@ -252,7 +250,7 @@ static void *recv_thread(void *arg) {
       for (int j = 0; j < task_subtype_count; j++) {
         if (todo_recv <= 0) break;
 
-        MPI_Win_flush_all(mpi_window[j]);  // XXX emergency measure
+        //MPI_Win_flush(n, mpi_window[j]); // XXX emergency measure
         BLOCKTYPE lockval = mpi_ptr[j][n * MESSAGE_SIZE];
 
         if (lockval == UNLOCKED) {
@@ -265,9 +263,9 @@ static void *recv_thread(void *arg) {
                 log->subtype == j) {
               found = 1;
 
-              message("We have a ready message %d/%d at %zd: lockval %zd",
-                      log->rank, log->subtype, log->otherrank * MESSAGE_SIZE,
-                      lockval);
+              //message("We have a ready message %d/%d at %zd: lockval %zd",
+              //        log->rank, log->subtype, log->otherrank * MESSAGE_SIZE,
+              //        lockval);
 
               /* Check data sent data is unchanged and received data is as
                * expected. */