Commit bc37999a authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Free the MPI types and operations at exit time to unclog the memory sanitizer

parent a06eedec
......@@ -1394,11 +1394,6 @@ int main(int argc, char *argv[]) {
#endif
}
#ifdef WITH_MPI
if ((res = MPI_Finalize()) != MPI_SUCCESS)
error("call to MPI_Finalize failed with error %i.", res);
#endif
/* Remove the stop file if used. Do this anyway, we could have missed the
* stop file if normal exit happened first. */
if (myrank == 0) force_stop = restart_stop_now(restart_dir, 1);
......@@ -1420,6 +1415,11 @@ int main(int argc, char *argv[]) {
engine_clean(&e, /*fof=*/0);
free(params);
#ifdef WITH_MPI
if ((res = MPI_Finalize()) != MPI_SUCCESS)
error("call to MPI_Finalize failed with error %i.", res);
#endif
/* Say goodbye. */
if (myrank == 0) message("done. Bye.");
......
......@@ -383,4 +383,9 @@ static void mpicollect_create_MPI_type(void) {
/* Create the reduction operation */
MPI_Op_create(mpicollectgroup1_reduce, 1, &mpicollectgroup1_reduce_op);
}
void mpicollect_free_MPI_type(void) {
MPI_Type_free(&mpicollectgroup1_type);
MPI_Op_free(&mpicollectgroup1_reduce_op);
}
#endif
......@@ -77,5 +77,7 @@ void collectgroup1_init(
long long total_nr_cells, long long total_nr_tasks, float tasks_per_cell,
const struct star_formation_history sfh);
void collectgroup1_reduce(struct collectgroup1 *grp1);
#ifdef WITH_MPI
void mpicollect_free_MPI_type(void);
#endif
#endif /* SWIFT_COLLECTGROUP_H */
......@@ -4693,6 +4693,14 @@ void engine_clean(struct engine *e, const int fof) {
}
free(e->proxy_ind);
free(e->proxies);
/* Free types */
part_free_mpi_types();
multipole_free_mpi_types();
stats_free_mpi_type();
proxy_free_mpi_type();
task_free_mpi_comms();
mpicollect_free_MPI_type();
#endif
/* Close files */
......
......@@ -86,4 +86,8 @@ void multipole_create_mpi_types(void) {
MPI_Op_create(gravity_tensors_mpi_reduce, 1, &multipole_mpi_reduce_op);
}
void multipole_free_mpi_types(void) {
MPI_Type_free(&multipole_mpi_type);
MPI_Op_free(&multipole_mpi_reduce_op);
}
#endif
......@@ -215,6 +215,7 @@ struct gravity_tensors {
extern MPI_Datatype multipole_mpi_type;
extern MPI_Op multipole_mpi_reduce_op;
void multipole_create_mpi_types(void);
void multipole_free_mpi_types(void);
#endif
/**
......
......@@ -433,4 +433,13 @@ void part_create_mpi_types(void) {
error("Failed to create MPI type for bparts.");
}
}
void part_free_mpi_types(void) {
MPI_Type_free(&part_mpi_type);
MPI_Type_free(&xpart_mpi_type);
MPI_Type_free(&gpart_mpi_type);
MPI_Type_free(&spart_mpi_type);
MPI_Type_free(&bpart_mpi_type);
}
#endif
......@@ -152,6 +152,7 @@ extern MPI_Datatype spart_mpi_type;
extern MPI_Datatype bpart_mpi_type;
void part_create_mpi_types(void);
void part_free_mpi_types(void);
#endif
#endif /* SWIFT_PART_H */
......@@ -1021,3 +1021,11 @@ void proxy_create_mpi_type(void) {
error("SWIFT was not compiled with MPI support.");
#endif
}
void proxy_free_mpi_type(void) {
#ifdef WITH_MPI
MPI_Type_free(&pcell_mpi_type);
#else
error("SWIFT was not compiled with MPI support.");
#endif
}
......@@ -113,5 +113,6 @@ void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
struct space *s);
void proxy_create_mpi_type(void);
void proxy_free_mpi_type(void);
#endif /* SWIFT_PROXY_H */
......@@ -522,6 +522,10 @@ void scheduler_write_dependencies(struct scheduler *s, int verbose) {
/* Be clean */
free(task_dep);
#ifdef WITH_MPI
MPI_Type_free(&data_type);
MPI_Op_free(&sum);
#endif
if (verbose)
message("Printing task graph took %.3f %s.",
......
......@@ -414,4 +414,9 @@ void stats_create_mpi_type(void) {
/* Create the reduction operation */
MPI_Op_create(stats_add_mpi, 1, &statistics_mpi_reduce_op);
}
void stats_free_mpi_type(void) {
MPI_Type_free(&statistics_mpi_type);
MPI_Op_free(&statistics_mpi_reduce_op);
}
#endif
......@@ -77,6 +77,7 @@ extern MPI_Datatype statistics_mpi_type;
extern MPI_Op statistics_mpi_reduce_op;
void stats_create_mpi_type(void);
void stats_free_mpi_type(void);
#endif
#endif /* SWIFT_STATISTICS_H */
......@@ -881,6 +881,14 @@ void task_create_mpi_comms(void) {
MPI_Comm_dup(MPI_COMM_WORLD, &subtaskMPI_comms[i]);
}
}
/**
* @brief Create global communicators for each of the subtasks.
*/
void task_free_mpi_comms(void) {
for (int i = 0; i < task_subtype_count; i++) {
MPI_Comm_free(&subtaskMPI_comms[i]);
}
}
#endif
/**
......
......@@ -243,5 +243,6 @@ void task_get_group_name(int type, int subtype, char *cluster);
#ifdef WITH_MPI
void task_create_mpi_comms(void);
void task_free_mpi_comms(void);
#endif
#endif /* SWIFT_TASK_H */
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment