Skip to content
Snippets Groups Projects
Commit 8a35b6f7 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Move the parallel i/o speed measurement code from comments to #ifdef sections.

parent 937e029b
Branches
Tags
1 merge request!460Improvements to i/o and parallel-i/o
...@@ -51,6 +51,9 @@ ...@@ -51,6 +51,9 @@
#include "units.h" #include "units.h"
#include "xmf.h" #include "xmf.h"
/* Are we timing the i/o? */
//#define IO_SPEED_MEASUREMENT
/* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */ /* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */
#define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL #define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL
...@@ -266,16 +269,20 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id, ...@@ -266,16 +269,20 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
num_elements * typeSize) != 0) num_elements * typeSize) != 0)
error("Unable to allocate temporary i/o buffer"); error("Unable to allocate temporary i/o buffer");
/* MPI_Barrier(MPI_COMM_WORLD); */ #ifdef IO_SPEED_MEASUREMENT
/* ticks tic = getticks(); */ MPI_Barrier(MPI_COMM_WORLD);
ticks tic = getticks();
#endif
/* Copy the particle data to the temporary buffer */ /* Copy the particle data to the temporary buffer */
io_copy_temp_buffer(temp, e, props, N, internal_units, snapshot_units); io_copy_temp_buffer(temp, e, props, N, internal_units, snapshot_units);
/* MPI_Barrier(MPI_COMM_WORLD); */ #ifdef IO_SPEED_MEASUREMENT
/* if(engine_rank == 0) */ MPI_Barrier(MPI_COMM_WORLD);
/* message( "Copying for '%s' took %.3f %s." , props.name, */ if(engine_rank == 0)
/* clocks_from_ticks(getticks() - tic), clocks_getunit()); */ message("Copying for '%s' took %.3f %s." , props.name,
clocks_from_ticks(getticks() - tic), clocks_getunit());
#endif
/* Create data space */ /* Create data space */
const hid_t h_memspace = H5Screate(H5S_SIMPLE); const hid_t h_memspace = H5Screate(H5S_SIMPLE);
...@@ -318,12 +325,13 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id, ...@@ -318,12 +325,13 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
} }
/* message("Writing %lld '%s', %zd elements = %zd bytes (int=%d) at offset /* message("Writing %lld '%s', %zd elements = %zd bytes (int=%d) at offset
* %zd", */ * %zd", N, props.name, N * props.dimension, N * props.dimension * typeSize, */
/* N, props.name, N * props.dimension, N * props.dimension * typeSize, */
/* (int)(N * props.dimension * typeSize), offset); */ /* (int)(N * props.dimension * typeSize), offset); */
/* MPI_Barrier(MPI_COMM_WORLD); */ #ifdef IO_SPEED_MEASUREMENT
/* tic = getticks(); */ MPI_Barrier(MPI_COMM_WORLD);
tic = getticks();
#endif
/* Write temporary buffer to HDF5 dataspace */ /* Write temporary buffer to HDF5 dataspace */
h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace, h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace,
...@@ -332,15 +340,17 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id, ...@@ -332,15 +340,17 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
error("Error while writing data array '%s'.", props.name); error("Error while writing data array '%s'.", props.name);
} }
/* MPI_Barrier(MPI_COMM_WORLD); */ #ifdef IO_SPEED_MEASUREMENT
/* ticks toc = getticks(); */ MPI_Barrier(MPI_COMM_WORLD);
/* float ms = clocks_from_ticks(toc - tic); */ ticks toc = getticks();
/* int megaBytes = N * props.dimension * typeSize / (1024 * 1024); */ float ms = clocks_from_ticks(toc - tic);
/* int total = 0; */ int megaBytes = N * props.dimension * typeSize / (1024 * 1024);
/* MPI_Reduce(&megaBytes, &total, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); */ int total = 0;
/* if (engine_rank == 0) */ MPI_Reduce(&megaBytes, &total, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* message("H5Dwrite for '%s' (%d MB) took %.3f %s (speed = %f MB/s).", */ if (engine_rank == 0)
/* props.name, total, ms, clocks_getunit(), total / (ms / 1000.)); */ message("H5Dwrite for '%s' (%d MB) took %.3f %s (speed = %f MB/s).",
props.name, total, ms, clocks_getunit(), total / (ms / 1000.));
#endif
/* Free and close everything */ /* Free and close everything */
free(temp); free(temp);
...@@ -372,7 +382,10 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, ...@@ -372,7 +382,10 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
const struct unit_system* snapshot_units) { const struct unit_system* snapshot_units) {
const size_t typeSize = io_sizeof_type(props.type); const size_t typeSize = io_sizeof_type(props.type);
/* const ticks tic = getticks(); */
#ifdef IO_SPEED_MEASUREMENT
const ticks tic = getticks();
#endif
/* Work out properties of the array in the file */ /* Work out properties of the array in the file */
int rank; int rank;
...@@ -461,7 +474,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, ...@@ -461,7 +474,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
MPI_Allreduce(MPI_IN_PLACE, &redo, 1, MPI_SIGNED_CHAR, MPI_MAX, MPI_Allreduce(MPI_IN_PLACE, &redo, 1, MPI_SIGNED_CHAR, MPI_MAX,
MPI_COMM_WORLD); MPI_COMM_WORLD);
if (redo /* && e->verbose*/ && mpi_rank == 0) if (redo && e->verbose && mpi_rank == 0)
message("Need to redo one iteration for array '%s'", props.name); message("Need to redo one iteration for array '%s'", props.name);
} }
...@@ -487,10 +500,12 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, ...@@ -487,10 +500,12 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
H5Dclose(h_data); H5Dclose(h_data);
H5Pclose(h_plist_id); H5Pclose(h_plist_id);
/* MPI_Barrier(MPI_COMM_WORLD); */ #ifdef IO_SPEED_MEASUREMENT
/* if(engine_rank == 0) */ MPI_Barrier(MPI_COMM_WORLD);
/* message( "'%s' took %.3f %s." , props.name, */ if(engine_rank == 0)
/* clocks_from_ticks(getticks() - tic), clocks_getunit()); */ message("'%s' took %.3f %s." , props.name,
clocks_from_ticks(getticks() - tic), clocks_getunit());
#endif
} }
/** /**
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment