Skip to content
Snippets Groups Projects
Commit e217851a authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Proxy exchange simulator now working

parent 1f1929e5
Branches
No related tags found
No related merge requests found
......@@ -61,11 +61,14 @@ int nr_recv_pcells = 0;
*/
static void pick_logs(void) {
size_t nlogs = mpiuse_nr_logs();
if (verbose) message("Restored %zd logs", nlogs);
/* Duplicate of logs. */
send_pcells = (struct mpiuse_log_entry **)calloc(nlogs, sizeof(struct mpiuse_log_entry *));
/* Separated logs. */
send_pcells = (struct mpiuse_log_entry **)calloc(
nlogs, sizeof(struct mpiuse_log_entry *));
nr_send_pcells = 0;
recv_pcells = (struct mpiuse_log_entry **)calloc(nlogs, sizeof(struct mpiuse_log_entry *));
recv_pcells = (struct mpiuse_log_entry **)calloc(
nlogs, sizeof(struct mpiuse_log_entry *));
nr_recv_pcells = 0;
for (int k = 0; k < nlogs; k++) {
......@@ -73,10 +76,12 @@ static void pick_logs(void) {
if (log->rank == myrank && log->activation) {
log->data = NULL;
if (log->type == task_type_send) {
if (log->subtype == task_subtype_pcells) {
send_pcells[nr_send_pcells] = log;
nr_send_pcells++;
} else if (log->subtype != task_subtype_count) {
} else if (log->subtype != task_subtype_pcells &&
log->subtype != task_subtype_count) {
error("task subtype '%d' is not a known value", log->subtype);
}
......@@ -85,7 +90,8 @@ static void pick_logs(void) {
if (log->subtype == task_subtype_pcells) {
recv_pcells[nr_recv_pcells] = log;
nr_recv_pcells++;
} else if (log->subtype != task_subtype_count) {
} else if (log->subtype != task_subtype_pcells &&
log->subtype != task_subtype_count) {
error("task subtype '%d' is not a known value", log->subtype);
}
......@@ -94,14 +100,16 @@ static void pick_logs(void) {
}
}
}
if (verbose)
message("Read %d send and %d recv pcells logs", nr_send_pcells,
nr_recv_pcells);
}
/**
* @brief usage help.
*/
static void usage(char *argv[]) {
fprintf(stderr, "Usage: %s [-vf] SWIFT_mpiuse-log-file.dat\n",
argv[0]);
fprintf(stderr, "Usage: %s [-vf] SWIFT_mpiuse-log-file.dat\n", argv[0]);
fprintf(stderr, " options: -v verbose\n");
fflush(stderr);
}
......@@ -175,33 +183,31 @@ int main(int argc, char *argv[]) {
struct mpiuse_log_entry *log = send_pcells[k];
/* Need to regenerate the tags for each other communication type. */
int basetag = log->rank * proxy_tag_shift;
int basetag = myrank * proxy_tag_shift;
/* Start Isend counts of pcells. Really just the size of the buffer we're
* about to send, SWIFT sends the count. */
int size = log->size;
res = MPI_Isend(&size, 1, MPI_INT, log->otherrank,
basetag + proxy_tag_count,
MPI_COMM_WORLD, &req_send_counts[k]);
if (res != MPI_SUCCESS)
error("Counts MPI_Isend failed.");
res =
MPI_Isend(&size, 1, MPI_INT, log->otherrank, basetag + proxy_tag_count,
MPI_COMM_WORLD, &req_send_counts[k]);
if (res != MPI_SUCCESS) error("Counts MPI_Isend failed.");
/* Start Isend of pcells. */
log->data = calloc(log->size, 1);
res = MPI_Isend(log->data, log->size, MPI_BYTE, log->otherrank,
basetag + proxy_tag_cells, MPI_COMM_WORLD,
&req_pcells_out[k]);
if (res != MPI_SUCCESS)
error("Pcell MPI_Isend failed.");
if (res != MPI_SUCCESS) error("Pcell MPI_Isend failed.");
/* Start Irecv counts of pcells from other rank. */
basetag = log->rank * proxy_tag_shift;
basetag = log->otherrank * proxy_tag_shift;
res = MPI_Irecv(&pcells_size[k], 1, MPI_INT, log->otherrank,
basetag + proxy_tag_count, MPI_COMM_WORLD,
&req_recv_counts[k]);
if (res != MPI_SUCCESS)
error("Counts MPI_Irecv failed.");
if (res != MPI_SUCCESS) error("Counts MPI_Irecv failed.");
}
message("All counts requests and pcell sends are launched");
/* Now wait for any of the counts irecvs to complete and then create the
* irecv for the pcells. */
......@@ -213,23 +219,23 @@ int main(int argc, char *argv[]) {
res = MPI_Waitany(nr_send_pcells, req_recv_counts, &pid, &status);
if (res != MPI_SUCCESS || pid == MPI_UNDEFINED)
error("MPI_Waitany failed.");
if (verbose) message("Counts received for proxy %d", pid);
struct mpiuse_log_entry *log = send_pcells[pid];
int basetag = log->rank * proxy_tag_shift;
int basetag = log->otherrank * proxy_tag_shift;
pcells_in[pid] = calloc(pcells_size[pid], 1);
res = MPI_Irecv(pcells_in[pid], pcells_size[pid], MPI_BYTE,
log->otherrank, basetag + proxy_tag_cells,
MPI_COMM_WORLD, &req_pcells_in[pid]);
res = MPI_Irecv(pcells_in[pid], pcells_size[pid], MPI_BYTE, log->otherrank,
basetag + proxy_tag_cells, MPI_COMM_WORLD,
&req_pcells_in[pid]);
if (res != MPI_SUCCESS)
error("Pcell MPI_Irecv failed.");
if (res != MPI_SUCCESS) error("Pcell MPI_Irecv failed.");
}
message("All proxy cell counts have arrived");
/* Waitall for all Isend counts to complete. */
res = MPI_Waitall(nr_send_pcells, req_send_counts, MPI_STATUSES_IGNORE);
if (res != MPI_SUCCESS)
error("Waitall for counts Isend failed.");
if (res != MPI_SUCCESS) error("Waitall for counts Isend failed.");
/* Now wait for the pcell irecvs to complete, so we receive the pcells,
* which would be unpacked in SWIFT. */
......@@ -242,11 +248,11 @@ int main(int argc, char *argv[]) {
/* XXX check the data received is correct? */
}
message("All proxy cells have arrived");
/* Waitall for Isend of pcells to complete. */
res = MPI_Waitall(nr_send_pcells, req_pcells_out, MPI_STATUSES_IGNORE);
if (res != MPI_SUCCESS)
error("Waitall for pcells Isend failed.");
if (res != MPI_SUCCESS) error("Waitall for pcells Isend failed.");
/* Shutdown MPI. */
res = MPI_Finalize();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment