diff --git a/src/engine.c b/src/engine.c index c5eca0edbb3c384bd88d1ea62e66aed386066fae..2f592799d1c82fe83131c964cf55448cd08d1a2a 100644 --- a/src/engine.c +++ b/src/engine.c @@ -2214,8 +2214,7 @@ void engine_exchange_proxy_multipoles(struct engine *e) { const int num_elements = p->cells_in[k]->pcell_size; /* Receive everything */ - MPI_Irecv(&buffer_recv[this_recv], - num_elements * sizeof(struct gravity_tensors), MPI_BYTE, + MPI_Irecv(&buffer_recv[this_recv], num_elements, multipole_mpi_type, p->cells_in[k]->nodeID, p->cells_in[k]->tag, MPI_COMM_WORLD, &requests[this_request]); @@ -2235,8 +2234,7 @@ void engine_exchange_proxy_multipoles(struct engine *e) { /* Send everything (note the use of cells_in[0] to get the correct node * ID. */ - MPI_Isend(&buffer_send[this_send], - num_elements * sizeof(struct gravity_tensors), MPI_BYTE, + MPI_Isend(&buffer_send[this_send], num_elements, multipole_mpi_type, p->cells_in[0]->nodeID, p->cells_out[k]->tag, MPI_COMM_WORLD, &requests[this_request]); @@ -6391,7 +6389,8 @@ void engine_config(int restart, struct engine *e, struct swift_params *params, /* Construct types for MPI communications */ #ifdef WITH_MPI part_create_mpi_types(); - stats_create_MPI_type(); + stats_create_mpi_type(); + proxy_create_mpi_type(); task_create_mpi_comms(); #endif diff --git a/src/proxy.c b/src/proxy.c index 55d1f7d082fe883ce6dba83791017391c48cf032..775b2ee7ba005c80af64a5707830ddfe5f36c8c8 100644 --- a/src/proxy.c +++ b/src/proxy.c @@ -42,6 +42,9 @@ #include "error.h" #include "space.h" +/* MPI data type for the communications */ +MPI_Datatype pcell_mpi_type; + /** * @brief Exchange tags between nodes. * @@ -197,8 +200,7 @@ void proxy_cells_exchange_first(struct proxy *p) { } /* Send the pcell buffer. */ - err = MPI_Isend(p->pcells_out, sizeof(struct pcell) * p->size_pcells_out, - MPI_BYTE, p->nodeID, + err = MPI_Isend(p->pcells_out, p->size_pcells_out, pcell_mpi_type, p->nodeID, p->mynodeID * proxy_tag_shift + proxy_tag_cells, MPI_COMM_WORLD, &p->req_cells_out); @@ -239,9 +241,8 @@ void proxy_cells_exchange_second(struct proxy *p) { error("Failed to allocate pcell_in buffer."); /* Receive the particle buffers. */ - int err = MPI_Irecv(p->pcells_in, sizeof(struct pcell) * p->size_pcells_in, - MPI_BYTE, p->nodeID, - p->nodeID * proxy_tag_shift + proxy_tag_cells, + int err = MPI_Irecv(p->pcells_in, p->size_pcells_in, pcell_mpi_type, + p->nodeID, p->nodeID * proxy_tag_shift + proxy_tag_cells, MPI_COMM_WORLD, &p->req_cells_in); if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv part data."); @@ -771,3 +772,15 @@ void proxy_init(struct proxy *p, int mynodeID, int nodeID) { } p->nr_sparts_out = 0; } + +/** + * @brief Registers the MPI types for the proxy cells. + */ +void proxy_create_mpi_type(void) { + + if (MPI_Type_contiguous(sizeof(struct pcell) / sizeof(unsigned char), + MPI_BYTE, &pcell_mpi_type) != MPI_SUCCESS || + MPI_Type_commit(&pcell_mpi_type) != MPI_SUCCESS) { + error("Failed to create MPI type for parts."); + } +} diff --git a/src/proxy.h b/src/proxy.h index fbc3f1a163333146b26c937fae55a7fc4c1f12d7..2e3f350333d9e6fdb09161f852cf3a143c60e7ce 100644 --- a/src/proxy.h +++ b/src/proxy.h @@ -105,5 +105,6 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies, struct space *s, int with_gravity); void proxy_tags_exchange(struct proxy *proxies, int num_proxies, struct space *s); +void proxy_create_mpi_type(void); #endif /* SWIFT_PROXY_H */ diff --git a/src/scheduler.c b/src/scheduler.c index a90a3f1b0fa2401a6bc128069486c679df535435..abfa6c549818a0e3c2b85a3cdd65e199557cf6e4 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -1571,10 +1571,9 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { } else if (t->subtype == task_subtype_multipole) { t->buff = (struct gravity_tensors *)malloc( sizeof(struct gravity_tensors) * t->ci->pcell_size); - err = MPI_Irecv(t->buff, - sizeof(struct gravity_tensors) * t->ci->pcell_size, - MPI_BYTE, t->ci->nodeID, t->flags, - subtaskMPI_comms[t->subtype], &t->req); + err = MPI_Irecv(t->buff, t->ci->pcell_size, multipole_mpi_type, + t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype], + &t->req); } else { error("Unknown communication sub-type"); } @@ -1639,10 +1638,9 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { t->buff = (struct gravity_tensors *)malloc( sizeof(struct gravity_tensors) * t->ci->pcell_size); cell_pack_multipoles(t->ci, (struct gravity_tensors *)t->buff); - err = MPI_Isend(t->buff, - t->ci->pcell_size * sizeof(struct gravity_tensors), - MPI_BYTE, t->cj->nodeID, t->flags, - subtaskMPI_comms[t->subtype], &t->req); + err = MPI_Isend(t->buff, t->ci->pcell_size, multipole_mpi_type, + t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype], + &t->req); } else { error("Unknown communication sub-type"); } diff --git a/src/statistics.c b/src/statistics.c index bdca6cfb4ef84bb64aa4776bfc600b0727e0d606..22ddc2e971cd6ce16c5310c7fcbf19927c549ceb 100644 --- a/src/statistics.c +++ b/src/statistics.c @@ -386,7 +386,7 @@ MPI_Op statistics_mpi_reduce_op; /** * @brief MPI reduce operator for #statistics structures. */ -void stats_add_MPI(void *in, void *inout, int *len, MPI_Datatype *datatype) { +void stats_add_mpi(void *in, void *inout, int *len, MPI_Datatype *datatype) { for (int i = 0; i < *len; ++i) stats_add(&((struct statistics *)inout)[0], @@ -396,7 +396,7 @@ void stats_add_MPI(void *in, void *inout, int *len, MPI_Datatype *datatype) { /** * @brief Registers MPI #statistics type and reduction function. */ -void stats_create_MPI_type(void) { +void stats_create_mpi_type(void) { /* This is not the recommended way of doing this. One should define the structure field by field @@ -411,6 +411,6 @@ void stats_create_MPI_type(void) { } /* Create the reduction operation */ - MPI_Op_create(stats_add_MPI, 1, &statistics_mpi_reduce_op); + MPI_Op_create(stats_add_mpi, 1, &statistics_mpi_reduce_op); } #endif diff --git a/src/statistics.h b/src/statistics.h index adc9f5b6a24a093419b7dd644404a68ef736a685..b741eac3d406d767f5652234b9a16d82464cc456 100644 --- a/src/statistics.h +++ b/src/statistics.h @@ -76,8 +76,7 @@ void stats_finalize(struct statistics* s); extern MPI_Datatype statistics_mpi_type; extern MPI_Op statistics_mpi_reduce_op; -void stats_add_MPI(void* in, void* out, int* len, MPI_Datatype* datatype); -void stats_create_MPI_type(void); +void stats_create_mpi_type(void); #endif #endif /* SWIFT_STATISTICS_H */