Skip to content
Snippets Groups Projects
Commit df8dc442 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Reduce the amount of synchronization

Hopefully not needed, code still runs
parent 9713ee72
No related branches found
No related tags found
2 merge requests!11Draft: Fast one-sided MPI version,!8Draft: RDMA version with wrapped infinity calls
This commit is part of merge request !11. Comments created here will be created in the context of that merge request.
......@@ -75,7 +75,7 @@ static const int task_type_send = 22;
static const int task_type_recv = 23;
/* Global communicators for each of the subtypes. */
#define task_subtype_count 30 // Just some upper limit on subtype.
#define task_subtype_count 22 // Just some upper limit on subtype.
static MPI_Comm subtypeMPI_comms[task_subtype_count];
/* And the windows for one-sided communications. */
......@@ -184,31 +184,29 @@ static void *send_thread(void *arg) {
log->otherrank, MESSAGE_SIZE * myrank,
mpi_window[log->subtype]);
// MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
MPI_Win_flush_all(mpi_window[log->subtype]);
if (oldval[0] == dataptr[0]) {
message("sent a message to %d/%d (%zd:%zd:%zd @ %zd)", log->otherrank,
log->subtype, dataptr[0], oldval[0], newval[0],
MESSAGE_SIZE * myrank);
} else {
message("failed to send a message to %d/%d (%zd:%zd:%zd) @ %zd",
log->otherrank, log->subtype, dataptr[0], oldval[0], newval[0],
MESSAGE_SIZE * myrank);
}
MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
//if (oldval[0] == dataptr[0]) {
// message("sent a message to %d/%d (%zd:%zd:%zd @ %zd)", log->otherrank,
// log->subtype, dataptr[0], oldval[0], newval[0],
// MESSAGE_SIZE * myrank);
//} else {
// message("failed to send a message to %d/%d (%zd:%zd:%zd) @ %zd",
// log->otherrank, log->subtype, dataptr[0], oldval[0], newval[0],
// MESSAGE_SIZE * myrank);
//}
/* Wait for completion, this is when remote flips back to LOCKED. We poll
* on a get, as the local window is only used for receiving. Use an Rget
* so we can use MPI_Test to get some local progression. */
newval[0] = UNLOCKED;
while (newval[0] != LOCKED) {
MPI_Win_flush_all(mpi_window[log->subtype]);
//MPI_Win_flush(log->otherrank, mpi_window[log->subtype]);
MPI_Request request;
MPI_Rget(&newval[0], 1, MPI_BLOCKTYPE, log->otherrank,
MESSAGE_SIZE * myrank, 1, MPI_BLOCKTYPE,
mpi_window[log->subtype], &request);
int flag = 0;
while (flag == 0) {
MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
......@@ -252,7 +250,7 @@ static void *recv_thread(void *arg) {
for (int j = 0; j < task_subtype_count; j++) {
if (todo_recv <= 0) break;
MPI_Win_flush_all(mpi_window[j]); // XXX emergency measure
//MPI_Win_flush(n, mpi_window[j]); // XXX emergency measure
BLOCKTYPE lockval = mpi_ptr[j][n * MESSAGE_SIZE];
if (lockval == UNLOCKED) {
......@@ -265,9 +263,9 @@ static void *recv_thread(void *arg) {
log->subtype == j) {
found = 1;
message("We have a ready message %d/%d at %zd: lockval %zd",
log->rank, log->subtype, log->otherrank * MESSAGE_SIZE,
lockval);
//message("We have a ready message %d/%d at %zd: lockval %zd",
// log->rank, log->subtype, log->otherrank * MESSAGE_SIZE,
// lockval);
/* Check data sent data is unchanged and received data is as
* expected. */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment