Commit 75a9d6f7 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Revert "move calls to MPI_Irecv to the enqueue_mapper."

This reverts commit 00499932.
parent 00499932
...@@ -1027,25 +1027,6 @@ void scheduler_enqueue_mapper(void *map_data, int num_elements, ...@@ -1027,25 +1027,6 @@ void scheduler_enqueue_mapper(void *map_data, int num_elements,
struct task *tasks = s->tasks; struct task *tasks = s->tasks;
for (int ind = 0; ind < num_elements; ind++) { for (int ind = 0; ind < num_elements; ind++) {
struct task *t = &tasks[tid[ind]]; struct task *t = &tasks[tid[ind]];
#ifdef WITH_MPI
/* If this is a recv task, enqueue the MPI call. */
if (t->type == task_type_recv) {
int err;
if (t->subtype == task_subtype_tend) {
t->buff = malloc(sizeof(int) * t->ci->pcell_size);
err = MPI_Irecv(t->buff, t->ci->pcell_size, MPI_INT, t->ci->nodeID,
t->flags, MPI_COMM_WORLD, &t->req);
} else {
err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type,
t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
}
if (err != MPI_SUCCESS) {
mpi_error(err, "Failed to emit irecv for particle data.");
}
}
#endif
if (atomic_dec(&t->wait) == 1 && !t->skip) { if (atomic_dec(&t->wait) == 1 && !t->skip) {
scheduler_enqueue(s, t); scheduler_enqueue(s, t);
} }
...@@ -1179,17 +1160,17 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { ...@@ -1179,17 +1160,17 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
break; break;
case task_type_recv: case task_type_recv:
#ifdef WITH_MPI #ifdef WITH_MPI
// if (t->subtype == task_subtype_tend) { if (t->subtype == task_subtype_tend) {
// t->buff = malloc(sizeof(int) * t->ci->pcell_size); t->buff = malloc(sizeof(int) * t->ci->pcell_size);
// err = MPI_Irecv(t->buff, t->ci->pcell_size, MPI_INT, t->ci->nodeID, err = MPI_Irecv(t->buff, t->ci->pcell_size, MPI_INT, t->ci->nodeID,
// t->flags, MPI_COMM_WORLD, &t->req); t->flags, MPI_COMM_WORLD, &t->req);
// } else { } else {
// err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type, err = MPI_Irecv(t->ci->parts, t->ci->count, part_mpi_type,
// t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req); t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
// } }
// if (err != MPI_SUCCESS) { if (err != MPI_SUCCESS) {
// mpi_error(err, "Failed to emit irecv for particle data."); mpi_error(err, "Failed to emit irecv for particle data.");
// } }
// message( "receiving %i parts with tag=%i from %i to %i." , // message( "receiving %i parts with tag=%i from %i to %i." ,
// t->ci->count , t->flags , t->ci->nodeID , s->nodeID ); // t->ci->count , t->flags , t->ci->nodeID , s->nodeID );
// fflush(stdout); // fflush(stdout);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment