Skip to content
Snippets Groups Projects
Commit efd4db84 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Time more sections of the parallel-io code when IO_SPEED_MEASURMENT is defined.

parents 137ab111 67ab894c
Branches
Tags
1 merge request!492More io timing
...@@ -53,12 +53,12 @@ ...@@ -53,12 +53,12 @@
#include "units.h" #include "units.h"
#include "xmf.h" #include "xmf.h"
/* Are we timing the i/o? */
//#define IO_SPEED_MEASUREMENT
/* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */ /* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */
#define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL #define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL
/* Are we timing the i/o? */
//#define IO_SPEED_MEASUREMENT
/** /**
* @brief Reads a chunk of data from an open HDF5 dataset * @brief Reads a chunk of data from an open HDF5 dataset
* *
...@@ -879,6 +879,14 @@ void write_output_parallel(struct engine* e, const char* baseName, ...@@ -879,6 +879,14 @@ void write_output_parallel(struct engine* e, const char* baseName,
h_err = H5Pset_mdc_config(plist_id, &mdc_config); h_err = H5Pset_mdc_config(plist_id, &mdc_config);
if (h_err < 0) error("Error setting the MDC config"); if (h_err < 0) error("Error setting the MDC config");
/* Use parallel meta-data writes */
#if H5_VERSION_GE(1, 10, 0)
h_err = H5Pset_all_coll_metadata_ops(plist_id, 1);
if (h_err < 0) error("Error setting collective meta-data on all ops");
h_err = H5Pset_coll_metadata_write(plist_id, 1);
if (h_err < 0) error("Error setting collective meta-data writes");
#endif
/* Open HDF5 file with the chosen parameters */ /* Open HDF5 file with the chosen parameters */
h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
if (h_file < 0) { if (h_file < 0) {
...@@ -904,6 +912,11 @@ void write_output_parallel(struct engine* e, const char* baseName, ...@@ -904,6 +912,11 @@ void write_output_parallel(struct engine* e, const char* baseName,
* specific output */ * specific output */
if (mpi_rank == 0) xmf_write_outputheader(xmfFile, fileName, e->time); if (mpi_rank == 0) xmf_write_outputheader(xmfFile, fileName, e->time);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
ticks tic = getticks();
#endif
/* Open header to write simulation properties */ /* Open header to write simulation properties */
/* message("Writing runtime parameters..."); */ /* message("Writing runtime parameters..."); */
h_grp = h_grp =
...@@ -996,6 +1009,15 @@ void write_output_parallel(struct engine* e, const char* baseName, ...@@ -996,6 +1009,15 @@ void write_output_parallel(struct engine* e, const char* baseName,
/* Print the system of Units used internally */ /* Print the system of Units used internally */
io_write_unit_system(h_file, internal_units, "InternalCodeUnits"); io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Writing HDF5 header took %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
#endif
/* Tell the user if a conversion will be needed */ /* Tell the user if a conversion will be needed */
if (e->verbose && mpi_rank == 0) { if (e->verbose && mpi_rank == 0) {
if (units_are_equal(snapshot_units, internal_units)) { if (units_are_equal(snapshot_units, internal_units)) {
...@@ -1102,24 +1124,75 @@ void write_output_parallel(struct engine* e, const char* baseName, ...@@ -1102,24 +1124,75 @@ void write_output_parallel(struct engine* e, const char* baseName,
dmparts = 0; dmparts = 0;
} }
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
tic = getticks();
#endif
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Closing particle group took %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
#endif
/* Close this particle group in the XMF file as well */ /* Close this particle group in the XMF file as well */
if (mpi_rank == 0) xmf_write_groupfooter(xmfFile, (enum part_type)ptype); if (mpi_rank == 0) xmf_write_groupfooter(xmfFile, (enum part_type)ptype);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Writing XMF group footer took %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
#endif
} }
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
tic = getticks();
#endif
/* Write LXMF file descriptor */ /* Write LXMF file descriptor */
if (mpi_rank == 0) xmf_write_outputfooter(xmfFile, outputCount, e->time); if (mpi_rank == 0) xmf_write_outputfooter(xmfFile, outputCount, e->time);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Writing XMF output footer took %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
#endif
/* message("Done writing particles..."); */ /* message("Done writing particles..."); */
/* Close property descriptor */ /* Close property descriptor */
H5Pclose(plist_id); H5Pclose(plist_id);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Closing property descriptor took %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
#endif
/* Close file */ /* Close file */
H5Fclose(h_file); H5Fclose(h_file);
#ifdef IO_SPEED_MEASUREMENT
MPI_Barrier(MPI_COMM_WORLD);
if (engine_rank == 0)
message("Closing file took %.3f %s.", clocks_from_ticks(getticks() - tic),
clocks_getunit());
#endif
++outputCount; ++outputCount;
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment