diff --git a/src/hydro/Default/hydro_io.h b/src/hydro/Default/hydro_io.h index 5908ebc6dbb11714749f5586e136925789dda333..705b6886e2658341b3c896c2d8cf6b63a9fbabb5 100644 --- a/src/hydro/Default/hydro_io.h +++ b/src/hydro/Default/hydro_io.h @@ -17,48 +17,80 @@ * ******************************************************************************/ - +/** + * @brief Reads the different particles to the HDF5 file + * + * @param h_grp The HDF5 group in which to read the arrays. + * @param N The number of particles on that MPI rank. + * @param N_total The total number of particles (only used in MPI mode) + * @param offset The offset of the particles for this MPI rank (only used in MPI mode) + * @param parts The particle array + * + */ __attribute__((always_inline)) -INLINE static void hydro_read_particles(hid_t h_grp, int N, struct part* parts) { +INLINE static void hydro_read_particles(hid_t h_grp, int N, long long N_total, + long long offset, struct part* parts) { /* Read arrays */ - readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, x, COMPULSORY); - readArray(h_grp, "Velocities", FLOAT, N, 3, parts, v, COMPULSORY); - readArray(h_grp, "Masses", FLOAT, N, 1, parts, mass, COMPULSORY); - readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, h, COMPULSORY); - readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, u, COMPULSORY); - readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, id, COMPULSORY); - readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, a, OPTIONAL); - readArray(h_grp, "Density", FLOAT, N, 1, parts, rho, OPTIONAL); + readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total, + offset, x, COMPULSORY); + readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total, + offset, v, COMPULSORY); + readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total, + offset, mass, COMPULSORY); + readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total, + offset, h, COMPULSORY); + readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total, + offset, u, COMPULSORY); + readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total, + offset, id, COMPULSORY); + readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total, + offset, a, OPTIONAL); + readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total, + offset, rho, OPTIONAL); } - - +/** + * @brief Writes the different particles to the HDF5 file + * + * @param h_grp The HDF5 group in which to write the arrays. + * @param fileName The name of the file (unsued in MPI mode). + * @param xmfFile The XMF file to write to (unused in MPI mode). + * @param N The number of particles on that MPI rank. + * @param N_total The total number of particles (only used in MPI mode) + * @param mpi_rank The MPI rank of this node (only used in MPI mode) + * @param offset The offset of the particles for this MPI rank (only used in MPI mode) + * @param parts The particle array + * @param us The unit system to use + * + */ __attribute__((always_inline)) INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile, - int N, struct part* parts, + int N, long long N_total, int mpi_rank, + long long offset, struct part* parts, struct UnitSystem* us) { /* Write arrays */ - writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, x, - us, UNIT_CONV_LENGTH); - writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, v, us, - UNIT_CONV_SPEED); - writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, mass, us, - UNIT_CONV_MASS); - writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, h, - us, UNIT_CONV_LENGTH); + writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, + N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH); + writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, + N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED); + writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS); + writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH); writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, - u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS); + N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS); writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, - id, us, UNIT_CONV_NO_UNITS); - writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, a, - us, UNIT_CONV_ACCELERATION); - writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, rho, us, - UNIT_CONV_DENSITY); + N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS); + writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, + N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION); + writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY); } + diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h index 69f902149bfb2714b72cedd08af2555798dcda8b..e6ced1b8aca911b8fd7742e9296cece5d237c328 100644 --- a/src/hydro/Gadget2/hydro_io.h +++ b/src/hydro/Gadget2/hydro_io.h @@ -17,48 +17,78 @@ * ******************************************************************************/ - +/** + * @brief Reads the different particles to the HDF5 file + * + * @param h_grp The HDF5 group in which to read the arrays. + * @param N The number of particles on that MPI rank. + * @param N_total The total number of particles (only used in MPI mode) + * @param offset The offset of the particles for this MPI rank (only used in MPI mode) + * @param parts The particle array + * + */ __attribute__((always_inline)) -INLINE static void hydro_read_particles(hid_t h_grp, int N, struct part* parts) { +INLINE static void hydro_read_particles(hid_t h_grp, int N, long long N_total, + long long offset, struct part* parts) { /* Read arrays */ - readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, x, COMPULSORY); - readArray(h_grp, "Velocities", FLOAT, N, 3, parts, v, COMPULSORY); - readArray(h_grp, "Masses", FLOAT, N, 1, parts, mass, COMPULSORY); - readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, h, COMPULSORY); - readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, entropy, COMPULSORY); - readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, id, COMPULSORY); - readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, a, OPTIONAL); - readArray(h_grp, "Density", FLOAT, N, 1, parts, rho, OPTIONAL); + readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total, + offset, x, COMPULSORY); + readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total, + offset, v, COMPULSORY); + readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total, + offset, mass, COMPULSORY); + readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total, + offset, h, COMPULSORY); + readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total, + offset, entropy, COMPULSORY); + readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total, + offset, id, COMPULSORY); + readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total, + offset, a, OPTIONAL); + readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total, + offset, rho, OPTIONAL); } - - - +/** + * @brief Writes the different particles to the HDF5 file + * + * @param h_grp The HDF5 group in which to write the arrays. + * @param fileName The name of the file (unsued in MPI mode). + * @param xmfFile The XMF file to write to (unused in MPI mode). + * @param N The number of particles on that MPI rank. + * @param N_total The total number of particles (only used in MPI mode) + * @param mpi_rank The MPI rank of this node (only used in MPI mode) + * @param offset The offset of the particles for this MPI rank (only used in MPI mode) + * @param parts The particle array + * @param us The unit system to use + * + */ __attribute__((always_inline)) INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile, - int N, struct part* parts, + int N, long long N_total, int mpi_rank, + long long offset, struct part* parts, struct UnitSystem* us) { /* Write arrays */ - writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, x, - us, UNIT_CONV_LENGTH); - writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, v, us, - UNIT_CONV_SPEED); - writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, mass, us, - UNIT_CONV_MASS); - writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, h, - us, UNIT_CONV_LENGTH); + writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, + N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH); + writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, + N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED); + writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS); + writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH); writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, - entropy, us, UNIT_CONV_ENTROPY_PER_UNIT_MASS); + N_total, mpi_rank, offset, entropy, us, UNIT_CONV_ENTROPY_PER_UNIT_MASS); writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, - id, us, UNIT_CONV_NO_UNITS); - writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, a, - us, UNIT_CONV_ACCELERATION); - writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, rho, us, - UNIT_CONV_DENSITY); + N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS); + writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, + N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION); + writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, + N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY); } diff --git a/src/parallel_io.c b/src/parallel_io.c index 3d006a65801e7b4c6ba941959dec5a548cbb8231..7ba4400dcb90a930d9ebecf18d91de7cc48e696f 100644 --- a/src/parallel_io.c +++ b/src/parallel_io.c @@ -151,147 +151,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, H5Dclose(h_data); } -/** - * @brief A helper macro to call the readArrayBackEnd function more easily. - * - * @param grp The group from which to read. - * @param name The name of the array to read. - * @param type The #DATA_TYPE of the attribute. - * @param N The number of particles on this MPI task. - * @param dim The dimension of the data (1 for scalar, 3 for vector) - * @param part The array of particles to fill - * @param N_total Total number of particles - * @param offset Offset in the array where this task starts reading - * @param field The name of the field (C code name as defined in part.h) to fill - * @param importance Is the data compulsory or not - * - */ -#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \ - importance) \ - readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ - (char*)(&(part[0]).field), importance) - -/** - * @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel - * - * @param fileName The file to read. - * @param dim (output) The dimension of the volume read from the file. - * @param parts (output) The array of #part read from the file. - * @param N (output) The number of particles read from the file. - * @param periodic (output) 1 if the volume is periodic, 0 if not. - * - * Opens the HDF5 file fileName and reads the particles contained - * in the parts array. N is the returned number of particles found - * in the file. - * - * @warning Can not read snapshot distributed over more than 1 file !!! - * @todo Read snapshots distributed in more than one file. - * - * Calls #error() if an error occurs. - * - */ -void read_ic_parallel(char* fileName, double dim[3], struct part** parts, - int* N, int* periodic, int mpi_rank, int mpi_size, - MPI_Comm comm, MPI_Info info) { - hid_t h_file = 0, h_grp = 0; - double boxSize[3] = { - 0.0, -1.0, -1.0}; /* GADGET has only cubic boxes (in cosmological mode) */ - int numParticles[6] = { - 0}; /* GADGET has 6 particle types. We only keep the type 0*/ - int numParticles_highWord[6] = {0}; - long long offset = 0; - long long N_total = 0; - - /* Open file */ - /* message("Opening file '%s' as IC.", fileName); */ - hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(h_plist_id, comm, info); - h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id); - if (h_file < 0) { - error("Error while opening file '%s'.", fileName); - } - - /* Open header to read simulation properties */ - /* message("Reading runtime parameters..."); */ - h_grp = H5Gopen1(h_file, "/RuntimePars"); - if (h_grp < 0) error("Error while opening runtime parameters\n"); - - /* Read the relevant information */ - readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); - - /* Close runtime parameters */ - H5Gclose(h_grp); - - /* Open header to read simulation properties */ - /* message("Reading file header..."); */ - h_grp = H5Gopen1(h_file, "/Header"); - if (h_grp < 0) error("Error while opening file header\n"); - - /* Read the relevant information and print status */ - readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); - readAttribute(h_grp, "NumPart_Total", UINT, numParticles); - readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord); - - N_total = ((long long)numParticles[0]) + - ((long long)numParticles_highWord[0] << 32); - dim[0] = boxSize[0]; - dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1]; - dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2]; - - /* message("Found %d particles in a %speriodic box of size [%f %f %f].", */ - /* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */ - - /* Divide the particles among the tasks. */ - offset = mpi_rank * N_total / mpi_size; - *N = (mpi_rank + 1) * N_total / mpi_size - offset; - - /* Close header */ - H5Gclose(h_grp); - - /* Allocate memory to store particles */ - if (posix_memalign((void*)parts, part_align, *N * sizeof(struct part)) != 0) - error("Error while allocating memory for particles"); - bzero(*parts, *N * sizeof(struct part)); - - /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / - * (1024.*1024.)); */ - - /* Open SPH particles group */ - /* message("Reading particle arrays..."); */ - h_grp = H5Gopen1(h_file, "/PartType0"); - if (h_grp < 0) error("Error while opening particle group.\n"); - - /* Read arrays */ - readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, N_total, offset, x, - COMPULSORY); - readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, N_total, offset, v, - COMPULSORY); - readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, N_total, offset, mass, - COMPULSORY); - readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, N_total, offset, h, - COMPULSORY); - readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, N_total, offset, - entropy, COMPULSORY); - readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id, - COMPULSORY); - /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */ - /* OPTIONAL); */ - readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a, - OPTIONAL); - readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho, - OPTIONAL); - - /* Close particle group */ - H5Gclose(h_grp); - - /* Close property handler */ - H5Pclose(h_plist_id); - - /* Close file */ - H5Fclose(h_file); - - /* message("Done Reading particles..."); */ -} /*----------------------------------------------------------------------------- * Routines writing an output file @@ -428,6 +287,29 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, H5Sclose(h_filespace); } + +/** + * @brief A helper macro to call the readArrayBackEnd function more easily. + * + * @param grp The group from which to read. + * @param name The name of the array to read. + * @param type The #DATA_TYPE of the attribute. + * @param N The number of particles on this MPI task. + * @param dim The dimension of the data (1 for scalar, 3 for vector) + * @param part The array of particles to fill + * @param N_total Total number of particles + * @param offset Offset in the array where this task starts reading + * @param field The name of the field (C code name as defined in part.h) to fill + * @param importance Is the data compulsory or not + * + */ +#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \ + importance) \ + readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ + (char*)(&(part[0]).field), importance) + + + /** * @brief A helper macro to call the writeArrayBackEnd function more easily. * @@ -455,6 +337,121 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, mpi_rank, offset, (char*)(&(part[0]).field), us, \ convFactor) +/* Import the right hydro definition */ +#ifdef LEGACY_GADGET2_SPH +#include "./hydro/Gadget2/hydro_io.h" +#else +#include "./hydro/Default/hydro_io.h" +#endif + + +/** + * @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel + * + * @param fileName The file to read. + * @param dim (output) The dimension of the volume read from the file. + * @param parts (output) The array of #part read from the file. + * @param N (output) The number of particles read from the file. + * @param periodic (output) 1 if the volume is periodic, 0 if not. + * + * Opens the HDF5 file fileName and reads the particles contained + * in the parts array. N is the returned number of particles found + * in the file. + * + * @warning Can not read snapshot distributed over more than 1 file !!! + * @todo Read snapshots distributed in more than one file. + * + * Calls #error() if an error occurs. + * + */ +void read_ic_parallel(char* fileName, double dim[3], struct part** parts, + int* N, int* periodic, int mpi_rank, int mpi_size, + MPI_Comm comm, MPI_Info info) { + hid_t h_file = 0, h_grp = 0; + double boxSize[3] = { + 0.0, -1.0, -1.0}; /* GADGET has only cubic boxes (in cosmological mode) */ + int numParticles[6] = { + 0}; /* GADGET has 6 particle types. We only keep the type 0*/ + int numParticles_highWord[6] = {0}; + long long offset = 0; + long long N_total = 0; + + /* Open file */ + /* message("Opening file '%s' as IC.", fileName); */ + hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(h_plist_id, comm, info); + h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id); + if (h_file < 0) { + error("Error while opening file '%s'.", fileName); + } + + /* Open header to read simulation properties */ + /* message("Reading runtime parameters..."); */ + h_grp = H5Gopen1(h_file, "/RuntimePars"); + if (h_grp < 0) error("Error while opening runtime parameters\n"); + + /* Read the relevant information */ + readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); + + /* Close runtime parameters */ + H5Gclose(h_grp); + + /* Open header to read simulation properties */ + /* message("Reading file header..."); */ + h_grp = H5Gopen1(h_file, "/Header"); + if (h_grp < 0) error("Error while opening file header\n"); + + /* Read the relevant information and print status */ + readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); + readAttribute(h_grp, "NumPart_Total", UINT, numParticles); + readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord); + + N_total = ((long long)numParticles[0]) + + ((long long)numParticles_highWord[0] << 32); + dim[0] = boxSize[0]; + dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1]; + dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2]; + + /* message("Found %d particles in a %speriodic box of size [%f %f %f].", */ + /* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */ + + /* Divide the particles among the tasks. */ + offset = mpi_rank * N_total / mpi_size; + *N = (mpi_rank + 1) * N_total / mpi_size - offset; + + /* Close header */ + H5Gclose(h_grp); + + /* Allocate memory to store particles */ + if (posix_memalign((void*)parts, part_align, *N * sizeof(struct part)) != 0) + error("Error while allocating memory for particles"); + bzero(*parts, *N * sizeof(struct part)); + + /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / + * (1024.*1024.)); */ + + /* Open SPH particles group */ + /* message("Reading particle arrays..."); */ + h_grp = H5Gopen1(h_file, "/PartType0"); + if (h_grp < 0) error("Error while opening particle group.\n"); + + /* Read particle fields into the particle structure */ + hydro_read_particles(h_grp, *N, N_total, offset, *parts); + + /* Close particle group */ + H5Gclose(h_grp); + + /* Close property handler */ + H5Pclose(h_plist_id); + + /* Close file */ + H5Fclose(h_file); + + /* message("Done Reading particles..."); */ +} + + + /** * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor * @@ -570,27 +567,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us, h_grp = H5Gcreate1(h_file, "/PartType0", 0); if (h_grp < 0) error("Error while creating particle group.\n"); - /* Write arrays */ - writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, - N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH); - writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, - N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED); - writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, N_total, - mpi_rank, offset, mass, us, UNIT_CONV_MASS); - writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, - N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH); - writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, - N_total, mpi_rank, offset, entropy, us, - UNIT_CONV_ENERGY_PER_UNIT_MASS); - writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, - N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS); - /* writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, - * N_total, */ - /* mpi_rank, offset, dt, us, UNIT_CONV_TIME); */ - writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, - N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION); - writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total, - mpi_rank, offset, rho, us, UNIT_CONV_DENSITY); + /* Write particle fields from the particle structure */ + hydro_write_particles(h_grp, fileName, xmfFile, N, N_total, mpi_rank, offset, parts, us); /* Close particle group */ H5Gclose(h_grp); diff --git a/src/serial_io.c b/src/serial_io.c index 806833213c56c6720d048da4aa7009a74f3b41ee..00a2e34d7a077881a334652b8afc9334853539b7 100644 --- a/src/serial_io.c +++ b/src/serial_io.c @@ -168,173 +168,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, H5Dclose(h_data); } -/** - * @brief A helper macro to call the readArrayBackEnd function more easily. - * - * @param grp The group from which to read. - * @param name The name of the array to read. - * @param type The #DATA_TYPE of the attribute. - * @param N The number of particles. - * @param dim The dimension of the data (1 for scalar, 3 for vector) - * @param part The array of particles to fill - * @param N_total Total number of particles - * @param offset Offset in the array where this task starts reading - * @param field The name of the field (C code name as defined in part.h) to fill - * @param importance Is the data compulsory or not - * - */ -#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \ - importance) \ - readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ - (char*)(&(part[0]).field), importance) - -/** - * @brief Reads an HDF5 initial condition file (GADGET-3 type) - * - * @param fileName The file to read. - * @param dim (output) The dimension of the volume read from the file. - * @param parts (output) The array of #part read from the file. - * @param N (output) The number of particles read from the file. - * @param periodic (output) 1 if the volume is periodic, 0 if not. - * - * Opens the HDF5 file fileName and reads the particles contained - * in the parts array. N is the returned number of particles found - * in the file. - * - * @warning Can not read snapshot distributed over more than 1 file !!! - * @todo Read snapshots distributed in more than one file. - * - * Calls #error() if an error occurs. - * - */ -void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N, - int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, - MPI_Info info) { - hid_t h_file = 0, h_grp = 0; - double boxSize[3] = {0.0, -1.0, -1.0}; - /* GADGET has only cubic boxes (in cosmological mode) */ - int numParticles[6] = {0}; - /* GADGET has 6 particle types. We only keep the type 0*/ - int numParticles_highWord[6] = {0}; - long long offset = 0; - long long N_total = 0; - int rank; - - /* First read some information about the content */ - if (mpi_rank == 0) { - - /* Open file */ - /* message("Opening file '%s' as IC.", fileName); */ - h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT); - if (h_file < 0) - error("Error while opening file '%s' for initial read.", fileName); - - /* Open header to read simulation properties */ - /* message("Reading runtime parameters..."); */ - h_grp = H5Gopen1(h_file, "/RuntimePars"); - if (h_grp < 0) error("Error while opening runtime parameters\n"); - - /* Read the relevant information */ - readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); - - /* Close runtime parameters */ - H5Gclose(h_grp); - - /* Open header to read simulation properties */ - /* message("Reading file header..."); */ - h_grp = H5Gopen1(h_file, "/Header"); - if (h_grp < 0) error("Error while opening file header\n"); - - /* Read the relevant information and print status */ - readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); - readAttribute(h_grp, "NumPart_Total", UINT, numParticles); - readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord); - - N_total = ((long long)numParticles[0]) + - ((long long)numParticles_highWord[0] << 32); - dim[0] = boxSize[0]; - dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1]; - dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2]; - - /* message("Found %lld particles in a %speriodic box of size [%f %f %f].", - */ - /* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */ - - fflush(stdout); - - /* Close header */ - H5Gclose(h_grp); - - /* Close file */ - H5Fclose(h_file); - } - - /* Now need to broadcast that information to all ranks. */ - MPI_Bcast(periodic, 1, MPI_INT, 0, comm); - MPI_Bcast(&N_total, 1, MPI_LONG_LONG, 0, comm); - MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm); - - /* Divide the particles among the tasks. */ - offset = mpi_rank * N_total / mpi_size; - *N = (mpi_rank + 1) * N_total / mpi_size - offset; - - /* Allocate memory to store particles */ - if (posix_memalign((void*)parts, part_align, (*N) * sizeof(struct part)) != 0) - error("Error while allocating memory for particles"); - bzero(*parts, *N * sizeof(struct part)); - /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */ - /* (1024.*1024.)); */ - - /* Now loop over ranks and read the data */ - for (rank = 0; rank < mpi_size; ++rank) { - - /* Is it this rank's turn to read ? */ - if (rank == mpi_rank) { - - h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT); - if (h_file < 0) - error("Error while opening file '%s' on rank %d.", fileName, mpi_rank); - - /* Open SPH particles group */ - /* message("Reading particle arrays..."); */ - h_grp = H5Gopen1(h_file, "/PartType0"); - if (h_grp < 0) - error("Error while opening particle group on rank %d.\n", mpi_rank); - - /* Read arrays */ - readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, N_total, offset, x, - COMPULSORY); - readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, N_total, offset, v, - COMPULSORY); - readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, N_total, offset, mass, - COMPULSORY); - readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, N_total, offset, - h, COMPULSORY); - readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, N_total, offset, - entropy, COMPULSORY); - readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, - id, COMPULSORY); - /* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, - */ - /* OPTIONAL); */ - readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a, - OPTIONAL); - readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho, - OPTIONAL); - - /* Close particle group */ - H5Gclose(h_grp); - - /* Close file */ - H5Fclose(h_file); - } - - /* Wait for the read of the reading to complete */ - MPI_Barrier(comm); - } - - /* message("Done Reading particles..."); */ -} /*----------------------------------------------------------------------------- * Routines writing an output file @@ -409,9 +242,9 @@ void prepareArray(hid_t grp, char* fileName, FILE* xmfFile, char* name, * * Calls #error() if an error occurs. */ -void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, - int dim, long long N_total, long long offset, - char* part_c) { +void writeArrayBackEnd(hid_t grp, char* name, + enum DATA_TYPE type, int N, int dim, long long N_total, + long long offset, char* part_c) { hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0; hsize_t shape[2], offsets[2]; void* temp = 0; @@ -478,12 +311,34 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, H5Sclose(h_filespace); } + +/** + * @brief A helper macro to call the readArrayBackEnd function more easily. + * + * @param grp The group from which to read. + * @param name The name of the array to read. + * @param type The #DATA_TYPE of the attribute. + * @param N The number of particles. + * @param dim The dimension of the data (1 for scalar, 3 for vector) + * @param part The array of particles to fill + * @param N_total Total number of particles + * @param offset Offset in the array where this task starts reading + * @param field The name of the field (C code name as defined in part.h) to fill + * @param importance Is the data compulsory or not + * + */ +#define readArray(grp, name, type, N, dim, part, N_total, offset, \ + field, importance) \ + readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ + (char*)(&(part[0]).field), importance) + + /** * @brief A helper macro to call the readArrayBackEnd function more easily. * * @param grp The group in which to write. - * @param fileName The name of the file in which the data is written - * @param xmfFile The FILE used to write the XMF description + * @param fileName Unused parameter in non-MPI mode + * @param xmfFile Unused parameter in non-MPI mode * @param name The name of the array to write. * @param type The #DATA_TYPE of the array. * @param N The number of particles to write. @@ -496,9 +351,152 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, * @param convFactor The UnitConversionFactor for this array * */ -#define writeArray(grp, name, type, N, dim, N_total, offset, part, field) \ - writeArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ - (char*)(&(part[0]).field)) +#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, N_total,\ + mpi_rank, offset, field, us, convFactor) \ + writeArrayBackEnd(grp, name, type, N, dim, \ + N_total, offset, (char*)(&(part[0]).field)) + + +/* Import the right hydro definition */ +#ifdef LEGACY_GADGET2_SPH +#include "./hydro/Gadget2/hydro_io.h" +#else +#include "./hydro/Default/hydro_io.h" +#endif + + +/** + * @brief Reads an HDF5 initial condition file (GADGET-3 type) + * + * @param fileName The file to read. + * @param dim (output) The dimension of the volume read from the file. + * @param parts (output) The array of #part read from the file. + * @param N (output) The number of particles read from the file. + * @param periodic (output) 1 if the volume is periodic, 0 if not. + * + * Opens the HDF5 file fileName and reads the particles contained + * in the parts array. N is the returned number of particles found + * in the file. + * + * @warning Can not read snapshot distributed over more than 1 file !!! + * @todo Read snapshots distributed in more than one file. + * + * Calls #error() if an error occurs. + * + */ +void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N, + int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, + MPI_Info info) { + hid_t h_file = 0, h_grp = 0; + double boxSize[3] = {0.0, -1.0, -1.0}; + /* GADGET has only cubic boxes (in cosmological mode) */ + int numParticles[6] = {0}; + /* GADGET has 6 particle types. We only keep the type 0*/ + int numParticles_highWord[6] = {0}; + long long offset = 0; + long long N_total = 0; + int rank; + + /* First read some information about the content */ + if (mpi_rank == 0) { + + /* Open file */ + /* message("Opening file '%s' as IC.", fileName); */ + h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT); + if (h_file < 0) + error("Error while opening file '%s' for initial read.", fileName); + + /* Open header to read simulation properties */ + /* message("Reading runtime parameters..."); */ + h_grp = H5Gopen1(h_file, "/RuntimePars"); + if (h_grp < 0) error("Error while opening runtime parameters\n"); + + /* Read the relevant information */ + readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); + + /* Close runtime parameters */ + H5Gclose(h_grp); + + /* Open header to read simulation properties */ + /* message("Reading file header..."); */ + h_grp = H5Gopen1(h_file, "/Header"); + if (h_grp < 0) error("Error while opening file header\n"); + + /* Read the relevant information and print status */ + readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); + readAttribute(h_grp, "NumPart_Total", UINT, numParticles); + readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord); + + N_total = ((long long)numParticles[0]) + + ((long long)numParticles_highWord[0] << 32); + dim[0] = boxSize[0]; + dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1]; + dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2]; + + /* message("Found %lld particles in a %speriodic box of size [%f %f %f].", + */ + /* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */ + + fflush(stdout); + + /* Close header */ + H5Gclose(h_grp); + + /* Close file */ + H5Fclose(h_file); + } + + /* Now need to broadcast that information to all ranks. */ + MPI_Bcast(periodic, 1, MPI_INT, 0, comm); + MPI_Bcast(&N_total, 1, MPI_LONG_LONG, 0, comm); + MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm); + + /* Divide the particles among the tasks. */ + offset = mpi_rank * N_total / mpi_size; + *N = (mpi_rank + 1) * N_total / mpi_size - offset; + + /* Allocate memory to store particles */ + if (posix_memalign((void*)parts, part_align, (*N) * sizeof(struct part)) != 0) + error("Error while allocating memory for particles"); + bzero(*parts, *N * sizeof(struct part)); + /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */ + /* (1024.*1024.)); */ + + /* Now loop over ranks and read the data */ + for (rank = 0; rank < mpi_size; ++rank) { + + /* Is it this rank's turn to read ? */ + if (rank == mpi_rank) { + + h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT); + if (h_file < 0) + error("Error while opening file '%s' on rank %d.", fileName, mpi_rank); + + /* Open SPH particles group */ + /* message("Reading particle arrays..."); */ + h_grp = H5Gopen1(h_file, "/PartType0"); + if (h_grp < 0) + error("Error while opening particle group on rank %d.\n", mpi_rank); + + /* Read particle fields into the particle structure */ + hydro_read_particles(h_grp, *N, N_total, offset, *parts); + + /* Close particle group */ + H5Gclose(h_grp); + + /* Close file */ + H5Fclose(h_file); + } + + /* Wait for the read of the reading to complete */ + MPI_Barrier(comm); + } + + /* message("Done Reading particles..."); */ +} + + + /** * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor @@ -663,19 +661,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank, if (h_grp < 0) error("Error while opening particle group on rank %d.\n", mpi_rank); - /* Write arrays */ - writeArray(h_grp, "Coordinates", DOUBLE, N, 3, N_total, offset, parts, x); - writeArray(h_grp, "Velocities", FLOAT, N, 3, N_total, offset, parts, v); - writeArray(h_grp, "Masses", FLOAT, N, 1, N_total, offset, parts, mass); - writeArray(h_grp, "SmoothingLength", FLOAT, N, 1, N_total, offset, parts, - h); - writeArray(h_grp, "InternalEnergy", FLOAT, N, 1, N_total, offset, parts, - entropy); - writeArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, N_total, offset, parts, - id); - // writeArray(h_grp, "TimeStep", FLOAT, N, 1, N_total, offset, parts, dt); - writeArray(h_grp, "Acceleration", FLOAT, N, 3, N_total, offset, parts, a); - writeArray(h_grp, "Density", FLOAT, N, 1, N_total, offset, parts, rho); + /* Write particle fields from the particle structure */ + hydro_write_particles(h_grp, fileName, xmfFile, N, N_total, 0, offset, parts, us); /* Close particle group */ H5Gclose(h_grp); @@ -692,4 +679,4 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank, ++outputCount; } -#endif /* HAVE_HDF5 */ +#endif /* HAVE_HDF5 && HAVE_MPI */ diff --git a/src/single_io.c b/src/single_io.c index f8f4c9fb4cf329ff34be2d53bad4de2f24de308a..ec4b6ba7edca426651f0f256488209948c3f8b14 100644 --- a/src/single_io.c +++ b/src/single_io.c @@ -241,13 +241,16 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, * @param N The number of particles. * @param dim The dimension of the data (1 for scalar, 3 for vector) * @param part The array of particles to fill + * @param N_total Unused parameter in non-MPI mode + * @param offset Unused parameter in non-MPI mode * @param field The name of the field (C code name as defined in part.h) to fill * @param importance Is the data compulsory or not * */ -#define readArray(grp, name, type, N, dim, part, field, importance) \ - readArrayBackEnd(grp, name, type, N, dim, (char*)(&(part[0]).field), \ - importance) +#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \ + importance) \ + readArrayBackEnd(grp, name, type, N, dim, \ + (char*)(&(part[0]).field), importance) /** @@ -261,16 +264,18 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, * @param N The number of particles to write. * @param dim The dimension of the data (1 for scalar, 3 for vector) * @param part A (char*) pointer on the first occurrence of the field of - *interest - *in the parts array + * interest in the parts array + * @param N_total Unused parameter in non-MPI mode + * @param mpi_rank Unused parameter in non-MPI mode + * @param offset Unused parameter in non-MPI mode * @param field The name (code name) of the field to read from. * @param us The UnitSystem currently in use * @param convFactor The UnitConversionFactor for this array * */ -#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, field, \ - us, convFactor) \ - writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, \ +#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, N_total, \ + mpi_rank, offset, field, us, convFactor) \ + writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, \ (char*)(&(part[0]).field), us, convFactor) @@ -362,7 +367,7 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts, int* N, if (h_grp < 0) error("Error while opening particle group.\n"); /* Read particle fields into the particle structure */ - hydro_read_particles(h_grp, *N, *parts); + hydro_read_particles(h_grp, *N, *N, 0, *parts); /* Close particle group */ H5Gclose(h_grp); @@ -470,7 +475,7 @@ void write_output_single(struct engine* e, struct UnitSystem* us) { if (h_grp < 0) error("Error while creating particle group.\n"); /* Write particle fields from the particle structure */ - hydro_write_particles(h_grp, fileName, xmfFile, N, parts, us); + hydro_write_particles(h_grp, fileName, xmfFile, N, N, 0, 0, parts, us); /* Close particle group */ H5Gclose(h_grp);