Skip to content
Snippets Groups Projects
Commit 24c0a7c0 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Unified the i/o system for all SPH modes and all i/o modes (parallel, serial, single)

parent 7d6760b5
Branches
Tags
2 merge requests!136Master,!85Tiny fixes to correctly accomodate the switch between SPH variations
...@@ -17,48 +17,80 @@ ...@@ -17,48 +17,80 @@
* *
******************************************************************************/ ******************************************************************************/
/**
* @brief Reads the different particles to the HDF5 file
*
* @param h_grp The HDF5 group in which to read the arrays.
* @param N The number of particles on that MPI rank.
* @param N_total The total number of particles (only used in MPI mode)
* @param offset The offset of the particles for this MPI rank (only used in MPI mode)
* @param parts The particle array
*
*/
__attribute__((always_inline)) __attribute__((always_inline))
INLINE static void hydro_read_particles(hid_t h_grp, int N, struct part* parts) { INLINE static void hydro_read_particles(hid_t h_grp, int N, long long N_total,
long long offset, struct part* parts) {
/* Read arrays */ /* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, x, COMPULSORY); readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total,
readArray(h_grp, "Velocities", FLOAT, N, 3, parts, v, COMPULSORY); offset, x, COMPULSORY);
readArray(h_grp, "Masses", FLOAT, N, 1, parts, mass, COMPULSORY); readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total,
readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, h, COMPULSORY); offset, v, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, u, COMPULSORY); readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total,
readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, id, COMPULSORY); offset, mass, COMPULSORY);
readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, a, OPTIONAL); readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total,
readArray(h_grp, "Density", FLOAT, N, 1, parts, rho, OPTIONAL); offset, h, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total,
offset, u, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total,
offset, id, COMPULSORY);
readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total,
offset, a, OPTIONAL);
readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total,
offset, rho, OPTIONAL);
} }
/**
* @brief Writes the different particles to the HDF5 file
*
* @param h_grp The HDF5 group in which to write the arrays.
* @param fileName The name of the file (unsued in MPI mode).
* @param xmfFile The XMF file to write to (unused in MPI mode).
* @param N The number of particles on that MPI rank.
* @param N_total The total number of particles (only used in MPI mode)
* @param mpi_rank The MPI rank of this node (only used in MPI mode)
* @param offset The offset of the particles for this MPI rank (only used in MPI mode)
* @param parts The particle array
* @param us The unit system to use
*
*/
__attribute__((always_inline)) __attribute__((always_inline))
INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile, INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile,
int N, struct part* parts, int N, long long N_total, int mpi_rank,
long long offset, struct part* parts,
struct UnitSystem* us) { struct UnitSystem* us) {
/* Write arrays */ /* Write arrays */
writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, x, writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts,
us, UNIT_CONV_LENGTH); N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, v, us, writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts,
UNIT_CONV_SPEED); N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, mass, us, writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts,
UNIT_CONV_MASS); N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, h, writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts,
us, UNIT_CONV_LENGTH); N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts,
u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS); N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
id, us, UNIT_CONV_NO_UNITS); N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, a, writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
us, UNIT_CONV_ACCELERATION); N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, rho, us, writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts,
UNIT_CONV_DENSITY); N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
} }
...@@ -17,48 +17,78 @@ ...@@ -17,48 +17,78 @@
* *
******************************************************************************/ ******************************************************************************/
/**
* @brief Reads the different particles to the HDF5 file
*
* @param h_grp The HDF5 group in which to read the arrays.
* @param N The number of particles on that MPI rank.
* @param N_total The total number of particles (only used in MPI mode)
* @param offset The offset of the particles for this MPI rank (only used in MPI mode)
* @param parts The particle array
*
*/
__attribute__((always_inline)) __attribute__((always_inline))
INLINE static void hydro_read_particles(hid_t h_grp, int N, struct part* parts) { INLINE static void hydro_read_particles(hid_t h_grp, int N, long long N_total,
long long offset, struct part* parts) {
/* Read arrays */ /* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, x, COMPULSORY); readArray(h_grp, "Coordinates", DOUBLE, N, 3, parts, N_total,
readArray(h_grp, "Velocities", FLOAT, N, 3, parts, v, COMPULSORY); offset, x, COMPULSORY);
readArray(h_grp, "Masses", FLOAT, N, 1, parts, mass, COMPULSORY); readArray(h_grp, "Velocities", FLOAT, N, 3, parts, N_total,
readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, h, COMPULSORY); offset, v, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, entropy, COMPULSORY); readArray(h_grp, "Masses", FLOAT, N, 1, parts, N_total,
readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, id, COMPULSORY); offset, mass, COMPULSORY);
readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, a, OPTIONAL); readArray(h_grp, "SmoothingLength", FLOAT, N, 1, parts, N_total,
readArray(h_grp, "Density", FLOAT, N, 1, parts, rho, OPTIONAL); offset, h, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, N, 1, parts, N_total,
offset, entropy, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, parts, N_total,
offset, id, COMPULSORY);
readArray(h_grp, "Acceleration", FLOAT, N, 3, parts, N_total,
offset, a, OPTIONAL);
readArray(h_grp, "Density", FLOAT, N, 1, parts, N_total,
offset, rho, OPTIONAL);
} }
/**
* @brief Writes the different particles to the HDF5 file
*
* @param h_grp The HDF5 group in which to write the arrays.
* @param fileName The name of the file (unsued in MPI mode).
* @param xmfFile The XMF file to write to (unused in MPI mode).
* @param N The number of particles on that MPI rank.
* @param N_total The total number of particles (only used in MPI mode)
* @param mpi_rank The MPI rank of this node (only used in MPI mode)
* @param offset The offset of the particles for this MPI rank (only used in MPI mode)
* @param parts The particle array
* @param us The unit system to use
*
*/
__attribute__((always_inline)) __attribute__((always_inline))
INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile, INLINE static void hydro_write_particles(hid_t h_grp, char* fileName, FILE* xmfFile,
int N, struct part* parts, int N, long long N_total, int mpi_rank,
long long offset, struct part* parts,
struct UnitSystem* us) { struct UnitSystem* us) {
/* Write arrays */ /* Write arrays */
writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, x, writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts,
us, UNIT_CONV_LENGTH); N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, v, us, writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts,
UNIT_CONV_SPEED); N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, mass, us, writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts,
UNIT_CONV_MASS); N_total, mpi_rank, offset, mass, us, UNIT_CONV_MASS);
writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, h, writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts,
us, UNIT_CONV_LENGTH); N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts,
entropy, us, UNIT_CONV_ENTROPY_PER_UNIT_MASS); N_total, mpi_rank, offset, entropy, us, UNIT_CONV_ENTROPY_PER_UNIT_MASS);
writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
id, us, UNIT_CONV_NO_UNITS); N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, a, writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
us, UNIT_CONV_ACCELERATION); N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, rho, us, writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts,
UNIT_CONV_DENSITY); N_total, mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
} }
...@@ -151,147 +151,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, ...@@ -151,147 +151,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
H5Dclose(h_data); H5Dclose(h_data);
} }
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles on this MPI task.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param N_total Total number of particles
* @param offset Offset in the array where this task starts reading
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
importance) \
readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \
(char*)(&(part[0]).field), importance)
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
int* N, int* periodic, int mpi_rank, int mpi_size,
MPI_Comm comm, MPI_Info info) {
hid_t h_file = 0, h_grp = 0;
double boxSize[3] = {
0.0, -1.0, -1.0}; /* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6] = {
0}; /* GADGET has 6 particle types. We only keep the type 0*/
int numParticles_highWord[6] = {0};
long long offset = 0;
long long N_total = 0;
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(h_plist_id, comm, info);
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
if (h_file < 0) {
error("Error while opening file '%s'.", fileName);
}
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if (h_grp < 0) error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if (h_grp < 0) error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord);
N_total = ((long long)numParticles[0]) +
((long long)numParticles_highWord[0] << 32);
dim[0] = boxSize[0];
dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];
/* message("Found %d particles in a %speriodic box of size [%f %f %f].", */
/* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
/* Divide the particles among the tasks. */
offset = mpi_rank * N_total / mpi_size;
*N = (mpi_rank + 1) * N_total / mpi_size - offset;
/* Close header */
H5Gclose(h_grp);
/* Allocate memory to store particles */
if (posix_memalign((void*)parts, part_align, *N * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero(*parts, *N * sizeof(struct part));
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) /
* (1024.*1024.)); */
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if (h_grp < 0) error("Error while opening particle group.\n");
/* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, N_total, offset, x,
COMPULSORY);
readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, N_total, offset, v,
COMPULSORY);
readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, N_total, offset, mass,
COMPULSORY);
readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, N_total, offset, h,
COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, N_total, offset,
entropy, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id,
COMPULSORY);
/* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt, */
/* OPTIONAL); */
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
OPTIONAL);
/* Close particle group */
H5Gclose(h_grp);
/* Close property handler */
H5Pclose(h_plist_id);
/* Close file */
H5Fclose(h_file);
/* message("Done Reading particles..."); */
}
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Routines writing an output file * Routines writing an output file
...@@ -428,6 +287,29 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, ...@@ -428,6 +287,29 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name,
H5Sclose(h_filespace); H5Sclose(h_filespace);
} }
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles on this MPI task.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param N_total Total number of particles
* @param offset Offset in the array where this task starts reading
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
importance) \
readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \
(char*)(&(part[0]).field), importance)
/** /**
* @brief A helper macro to call the writeArrayBackEnd function more easily. * @brief A helper macro to call the writeArrayBackEnd function more easily.
* *
...@@ -455,6 +337,121 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, ...@@ -455,6 +337,121 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name,
mpi_rank, offset, (char*)(&(part[0]).field), us, \ mpi_rank, offset, (char*)(&(part[0]).field), us, \
convFactor) convFactor)
/* Import the right hydro definition */
#ifdef LEGACY_GADGET2_SPH
#include "./hydro/Gadget2/hydro_io.h"
#else
#include "./hydro/Default/hydro_io.h"
#endif
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
int* N, int* periodic, int mpi_rank, int mpi_size,
MPI_Comm comm, MPI_Info info) {
hid_t h_file = 0, h_grp = 0;
double boxSize[3] = {
0.0, -1.0, -1.0}; /* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6] = {
0}; /* GADGET has 6 particle types. We only keep the type 0*/
int numParticles_highWord[6] = {0};
long long offset = 0;
long long N_total = 0;
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(h_plist_id, comm, info);
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
if (h_file < 0) {
error("Error while opening file '%s'.", fileName);
}
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if (h_grp < 0) error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if (h_grp < 0) error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord);
N_total = ((long long)numParticles[0]) +
((long long)numParticles_highWord[0] << 32);
dim[0] = boxSize[0];
dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];
/* message("Found %d particles in a %speriodic box of size [%f %f %f].", */
/* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
/* Divide the particles among the tasks. */
offset = mpi_rank * N_total / mpi_size;
*N = (mpi_rank + 1) * N_total / mpi_size - offset;
/* Close header */
H5Gclose(h_grp);
/* Allocate memory to store particles */
if (posix_memalign((void*)parts, part_align, *N * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero(*parts, *N * sizeof(struct part));
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) /
* (1024.*1024.)); */
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if (h_grp < 0) error("Error while opening particle group.\n");
/* Read particle fields into the particle structure */
hydro_read_particles(h_grp, *N, N_total, offset, *parts);
/* Close particle group */
H5Gclose(h_grp);
/* Close property handler */
H5Pclose(h_plist_id);
/* Close file */
H5Fclose(h_file);
/* message("Done Reading particles..."); */
}
/** /**
* @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
* *
...@@ -570,27 +567,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us, ...@@ -570,27 +567,8 @@ void write_output_parallel(struct engine* e, struct UnitSystem* us,
h_grp = H5Gcreate1(h_file, "/PartType0", 0); h_grp = H5Gcreate1(h_file, "/PartType0", 0);
if (h_grp < 0) error("Error while creating particle group.\n"); if (h_grp < 0) error("Error while creating particle group.\n");
/* Write arrays */ /* Write particle fields from the particle structure */
writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, hydro_write_particles(h_grp, fileName, xmfFile, N, N_total, mpi_rank, offset, parts, us);
N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts,
N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, N_total,
mpi_rank, offset, mass, us, UNIT_CONV_MASS);
writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts,
N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts,
N_total, mpi_rank, offset, entropy, us,
UNIT_CONV_ENERGY_PER_UNIT_MASS);
writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
/* writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts,
* N_total, */
/* mpi_rank, offset, dt, us, UNIT_CONV_TIME); */
writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total,
mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
......
...@@ -168,173 +168,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, ...@@ -168,173 +168,6 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
H5Dclose(h_data); H5Dclose(h_data);
} }
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param N_total Total number of particles
* @param offset Offset in the array where this task starts reading
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
importance) \
readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \
(char*)(&(part[0]).field), importance)
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type)
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info) {
hid_t h_file = 0, h_grp = 0;
double boxSize[3] = {0.0, -1.0, -1.0};
/* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6] = {0};
/* GADGET has 6 particle types. We only keep the type 0*/
int numParticles_highWord[6] = {0};
long long offset = 0;
long long N_total = 0;
int rank;
/* First read some information about the content */
if (mpi_rank == 0) {
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if (h_file < 0)
error("Error while opening file '%s' for initial read.", fileName);
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if (h_grp < 0) error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if (h_grp < 0) error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord);
N_total = ((long long)numParticles[0]) +
((long long)numParticles_highWord[0] << 32);
dim[0] = boxSize[0];
dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];
/* message("Found %lld particles in a %speriodic box of size [%f %f %f].",
*/
/* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
fflush(stdout);
/* Close header */
H5Gclose(h_grp);
/* Close file */
H5Fclose(h_file);
}
/* Now need to broadcast that information to all ranks. */
MPI_Bcast(periodic, 1, MPI_INT, 0, comm);
MPI_Bcast(&N_total, 1, MPI_LONG_LONG, 0, comm);
MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm);
/* Divide the particles among the tasks. */
offset = mpi_rank * N_total / mpi_size;
*N = (mpi_rank + 1) * N_total / mpi_size - offset;
/* Allocate memory to store particles */
if (posix_memalign((void*)parts, part_align, (*N) * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero(*parts, *N * sizeof(struct part));
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */
/* (1024.*1024.)); */
/* Now loop over ranks and read the data */
for (rank = 0; rank < mpi_size; ++rank) {
/* Is it this rank's turn to read ? */
if (rank == mpi_rank) {
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if (h_file < 0)
error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if (h_grp < 0)
error("Error while opening particle group on rank %d.\n", mpi_rank);
/* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, N_total, offset, x,
COMPULSORY);
readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, N_total, offset, v,
COMPULSORY);
readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, N_total, offset, mass,
COMPULSORY);
readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, N_total, offset,
h, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, N_total, offset,
entropy, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset,
id, COMPULSORY);
/* readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt,
*/
/* OPTIONAL); */
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
OPTIONAL);
/* Close particle group */
H5Gclose(h_grp);
/* Close file */
H5Fclose(h_file);
}
/* Wait for the read of the reading to complete */
MPI_Barrier(comm);
}
/* message("Done Reading particles..."); */
}
/*----------------------------------------------------------------------------- /*-----------------------------------------------------------------------------
* Routines writing an output file * Routines writing an output file
...@@ -409,9 +242,9 @@ void prepareArray(hid_t grp, char* fileName, FILE* xmfFile, char* name, ...@@ -409,9 +242,9 @@ void prepareArray(hid_t grp, char* fileName, FILE* xmfFile, char* name,
* *
* Calls #error() if an error occurs. * Calls #error() if an error occurs.
*/ */
void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, void writeArrayBackEnd(hid_t grp, char* name,
int dim, long long N_total, long long offset, enum DATA_TYPE type, int N, int dim, long long N_total,
char* part_c) { long long offset, char* part_c) {
hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0; hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0;
hsize_t shape[2], offsets[2]; hsize_t shape[2], offsets[2];
void* temp = 0; void* temp = 0;
...@@ -478,12 +311,34 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, ...@@ -478,12 +311,34 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
H5Sclose(h_filespace); H5Sclose(h_filespace);
} }
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param N_total Total number of particles
* @param offset Offset in the array where this task starts reading
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, N_total, offset, \
field, importance) \
readArrayBackEnd(grp, name, type, N, dim, N_total, offset, \
(char*)(&(part[0]).field), importance)
/** /**
* @brief A helper macro to call the readArrayBackEnd function more easily. * @brief A helper macro to call the readArrayBackEnd function more easily.
* *
* @param grp The group in which to write. * @param grp The group in which to write.
* @param fileName The name of the file in which the data is written * @param fileName Unused parameter in non-MPI mode
* @param xmfFile The FILE used to write the XMF description * @param xmfFile Unused parameter in non-MPI mode
* @param name The name of the array to write. * @param name The name of the array to write.
* @param type The #DATA_TYPE of the array. * @param type The #DATA_TYPE of the array.
* @param N The number of particles to write. * @param N The number of particles to write.
...@@ -496,9 +351,152 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, ...@@ -496,9 +351,152 @@ void writeArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
* @param convFactor The UnitConversionFactor for this array * @param convFactor The UnitConversionFactor for this array
* *
*/ */
#define writeArray(grp, name, type, N, dim, N_total, offset, part, field) \ #define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, N_total,\
writeArrayBackEnd(grp, name, type, N, dim, N_total, offset, \ mpi_rank, offset, field, us, convFactor) \
(char*)(&(part[0]).field)) writeArrayBackEnd(grp, name, type, N, dim, \
N_total, offset, (char*)(&(part[0]).field))
/* Import the right hydro definition */
#ifdef LEGACY_GADGET2_SPH
#include "./hydro/Gadget2/hydro_io.h"
#else
#include "./hydro/Default/hydro_io.h"
#endif
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type)
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snapshots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_serial(char* fileName, double dim[3], struct part** parts, int* N,
int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm,
MPI_Info info) {
hid_t h_file = 0, h_grp = 0;
double boxSize[3] = {0.0, -1.0, -1.0};
/* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6] = {0};
/* GADGET has 6 particle types. We only keep the type 0*/
int numParticles_highWord[6] = {0};
long long offset = 0;
long long N_total = 0;
int rank;
/* First read some information about the content */
if (mpi_rank == 0) {
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if (h_file < 0)
error("Error while opening file '%s' for initial read.", fileName);
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if (h_grp < 0) error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if (h_grp < 0) error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord);
N_total = ((long long)numParticles[0]) +
((long long)numParticles_highWord[0] << 32);
dim[0] = boxSize[0];
dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];
/* message("Found %lld particles in a %speriodic box of size [%f %f %f].",
*/
/* N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
fflush(stdout);
/* Close header */
H5Gclose(h_grp);
/* Close file */
H5Fclose(h_file);
}
/* Now need to broadcast that information to all ranks. */
MPI_Bcast(periodic, 1, MPI_INT, 0, comm);
MPI_Bcast(&N_total, 1, MPI_LONG_LONG, 0, comm);
MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm);
/* Divide the particles among the tasks. */
offset = mpi_rank * N_total / mpi_size;
*N = (mpi_rank + 1) * N_total / mpi_size - offset;
/* Allocate memory to store particles */
if (posix_memalign((void*)parts, part_align, (*N) * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero(*parts, *N * sizeof(struct part));
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */
/* (1024.*1024.)); */
/* Now loop over ranks and read the data */
for (rank = 0; rank < mpi_size; ++rank) {
/* Is it this rank's turn to read ? */
if (rank == mpi_rank) {
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if (h_file < 0)
error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if (h_grp < 0)
error("Error while opening particle group on rank %d.\n", mpi_rank);
/* Read particle fields into the particle structure */
hydro_read_particles(h_grp, *N, N_total, offset, *parts);
/* Close particle group */
H5Gclose(h_grp);
/* Close file */
H5Fclose(h_file);
}
/* Wait for the read of the reading to complete */
MPI_Barrier(comm);
}
/* message("Done Reading particles..."); */
}
/** /**
* @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
...@@ -663,19 +661,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank, ...@@ -663,19 +661,8 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank,
if (h_grp < 0) if (h_grp < 0)
error("Error while opening particle group on rank %d.\n", mpi_rank); error("Error while opening particle group on rank %d.\n", mpi_rank);
/* Write arrays */ /* Write particle fields from the particle structure */
writeArray(h_grp, "Coordinates", DOUBLE, N, 3, N_total, offset, parts, x); hydro_write_particles(h_grp, fileName, xmfFile, N, N_total, 0, offset, parts, us);
writeArray(h_grp, "Velocities", FLOAT, N, 3, N_total, offset, parts, v);
writeArray(h_grp, "Masses", FLOAT, N, 1, N_total, offset, parts, mass);
writeArray(h_grp, "SmoothingLength", FLOAT, N, 1, N_total, offset, parts,
h);
writeArray(h_grp, "InternalEnergy", FLOAT, N, 1, N_total, offset, parts,
entropy);
writeArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, N_total, offset, parts,
id);
// writeArray(h_grp, "TimeStep", FLOAT, N, 1, N_total, offset, parts, dt);
writeArray(h_grp, "Acceleration", FLOAT, N, 3, N_total, offset, parts, a);
writeArray(h_grp, "Density", FLOAT, N, 1, N_total, offset, parts, rho);
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
...@@ -692,4 +679,4 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank, ...@@ -692,4 +679,4 @@ void write_output_serial(struct engine* e, struct UnitSystem* us, int mpi_rank,
++outputCount; ++outputCount;
} }
#endif /* HAVE_HDF5 */ #endif /* HAVE_HDF5 && HAVE_MPI */
...@@ -241,13 +241,16 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, ...@@ -241,13 +241,16 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name,
* @param N The number of particles. * @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector) * @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill * @param part The array of particles to fill
* @param N_total Unused parameter in non-MPI mode
* @param offset Unused parameter in non-MPI mode
* @param field The name of the field (C code name as defined in part.h) to fill * @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not * @param importance Is the data compulsory or not
* *
*/ */
#define readArray(grp, name, type, N, dim, part, field, importance) \ #define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
readArrayBackEnd(grp, name, type, N, dim, (char*)(&(part[0]).field), \ importance) \
importance) readArrayBackEnd(grp, name, type, N, dim, \
(char*)(&(part[0]).field), importance)
/** /**
...@@ -261,16 +264,18 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, ...@@ -261,16 +264,18 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name,
* @param N The number of particles to write. * @param N The number of particles to write.
* @param dim The dimension of the data (1 for scalar, 3 for vector) * @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part A (char*) pointer on the first occurrence of the field of * @param part A (char*) pointer on the first occurrence of the field of
*interest * interest in the parts array
*in the parts array * @param N_total Unused parameter in non-MPI mode
* @param mpi_rank Unused parameter in non-MPI mode
* @param offset Unused parameter in non-MPI mode
* @param field The name (code name) of the field to read from. * @param field The name (code name) of the field to read from.
* @param us The UnitSystem currently in use * @param us The UnitSystem currently in use
* @param convFactor The UnitConversionFactor for this array * @param convFactor The UnitConversionFactor for this array
* *
*/ */
#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, field, \ #define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, N_total, \
us, convFactor) \ mpi_rank, offset, field, us, convFactor) \
writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, \ writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, \
(char*)(&(part[0]).field), us, convFactor) (char*)(&(part[0]).field), us, convFactor)
...@@ -362,7 +367,7 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts, int* N, ...@@ -362,7 +367,7 @@ void read_ic_single(char* fileName, double dim[3], struct part** parts, int* N,
if (h_grp < 0) error("Error while opening particle group.\n"); if (h_grp < 0) error("Error while opening particle group.\n");
/* Read particle fields into the particle structure */ /* Read particle fields into the particle structure */
hydro_read_particles(h_grp, *N, *parts); hydro_read_particles(h_grp, *N, *N, 0, *parts);
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
...@@ -470,7 +475,7 @@ void write_output_single(struct engine* e, struct UnitSystem* us) { ...@@ -470,7 +475,7 @@ void write_output_single(struct engine* e, struct UnitSystem* us) {
if (h_grp < 0) error("Error while creating particle group.\n"); if (h_grp < 0) error("Error while creating particle group.\n");
/* Write particle fields from the particle structure */ /* Write particle fields from the particle structure */
hydro_write_particles(h_grp, fileName, xmfFile, N, parts, us); hydro_write_particles(h_grp, fileName, xmfFile, N, N, 0, 0, parts, us);
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment