diff --git a/src/scheduler.c b/src/scheduler.c index 0fb508346e486c1bb35dd51ab31d5e0bcd75be0c..384b8070c0039e24a7217eb98400d997ab40ad30 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -1048,8 +1048,8 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { break; case task_type_recv: #ifdef WITH_MPI - if ((err = MPI_Irecv(t->ci->parts, sizeof(struct part) * t->ci->count, - MPI_BYTE, t->ci->nodeID, t->flags, MPI_COMM_WORLD, + if ((err = MPI_Irecv(t->ci->parts, t->ci->count, + s->part_mpi_type, t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req)) != MPI_SUCCESS) { char buff[MPI_MAX_ERROR_STRING]; int len; @@ -1067,7 +1067,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { case task_type_send: #ifdef WITH_MPI if ((err = MPI_Isend(t->ci->parts, sizeof(struct part) * t->ci->count, - MPI_BYTE, t->cj->nodeID, t->flags, MPI_COMM_WORLD, + s->part_mpi_type, t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req)) != MPI_SUCCESS) { char buff[MPI_MAX_ERROR_STRING]; int len; @@ -1309,4 +1309,11 @@ void scheduler_init(struct scheduler *s, struct space *space, int nr_queues, s->size = 0; s->nr_tasks = 0; s->tasks_next = 0; + +/* Construct types for MPI communications */ +#ifdef WITH_MPI + part_create_mpi_type(&s->part_mpi_type); + xpart_create_mpi_type(&s->xpart_mpi_type); +#endif + } diff --git a/src/scheduler.h b/src/scheduler.h index b25ba843b29496439337c4876eb1b87f72367794..97b2e1a05d2cdea617c2067ab77fd30ea8dce2cc 100644 --- a/src/scheduler.h +++ b/src/scheduler.h @@ -20,6 +20,14 @@ #ifndef SWIFT_SCHEDULER_H #define SWIFT_SCHEDULER_H +/* Config parameters. */ +#include "../config.h" + +/* MPI headers. */ +#ifdef WITH_MPI +#include <mpi.h> +#endif + /* Some standard headers. */ #include <pthread.h> @@ -89,6 +97,13 @@ struct scheduler { /* The node we are working on. */ int nodeID; + +#ifdef WITH_MPI + /* MPI data type for the particle transfers */ + MPI_Datatype part_mpi_type; + MPI_Datatype xpart_mpi_type; +#endif + }; /* Function prototypes. */