diff --git a/src/common_io.c b/src/common_io.c index b564d8a15c80ad551bfcfe5d38e4546b9ea8dc1f..91c99095e0efbed06d8ad5df0fd6dc510af9f283 100644 --- a/src/common_io.c +++ b/src/common_io.c @@ -543,8 +543,9 @@ void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype) { * * @todo Treat the types in a better way. */ -void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName, - char* name, size_t N, int dim, enum DATA_TYPE type) { +void writeXMFline(FILE* xmfFile, const char* fileName, + const char* partTypeGroupName, const char* name, size_t N, + int dim, enum DATA_TYPE type) { fprintf(xmfFile, "<Attribute Name=\"%s\" AttributeType=\"%s\" Center=\"Node\">\n", name, dim == 1 ? "Scalar" : "Vector"); diff --git a/src/common_io.h b/src/common_io.h index 30e93d2d2dae05628f6a6d277ab5d511497b5879..52629172d890433652400778c491f186ac918820 100644 --- a/src/common_io.h +++ b/src/common_io.h @@ -94,8 +94,9 @@ void writeXMFoutputfooter(FILE* xmfFile, int outputCount, float time); void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N, enum PARTICLE_TYPE ptype); void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype); -void writeXMFline(FILE* xmfFile, char* fileName, char* partTypeGroupName, - char* name, size_t N, int dim, enum DATA_TYPE type); +void writeXMFline(FILE* xmfFile, const char* fileName, + const char* partTypeGroupName, const char* name, size_t N, + int dim, enum DATA_TYPE type); void writeCodeDescription(hid_t h_file); diff --git a/src/gravity/Default/gravity_io.h b/src/gravity/Default/gravity_io.h index 724f35346f07879dac6987c8ea99048b27a4ff2a..c96c800becdb6dfca29265f912a0dc0406c4819f 100644 --- a/src/gravity/Default/gravity_io.h +++ b/src/gravity/Default/gravity_io.h @@ -17,6 +17,8 @@ * ******************************************************************************/ +#include "io_properties.h" + /** * @brief Read dark matter particles from HDF5. * @@ -45,24 +47,6 @@ void darkmatter_read_particles(struct gpart* gparts, struct io_props* list, gparts, mass); list[3] = io_make_input_field("ParticleIDs", ULONGLONG, 1, COMPULSORY, UNIT_CONV_NO_UNITS, gparts, id_or_neg_offset); - - /* Read arrays */ - /* readArray(h_grp, "Coordinates", DOUBLE, N, 3, gparts, N_total, offset, x, - */ - /* COMPULSORY, internal_units, ic_units, UNIT_CONV_LENGTH); */ - /* readArray(h_grp, "Masses", FLOAT, N, 1, gparts, N_total, offset, mass, */ - /* COMPULSORY, internal_units, ic_units, UNIT_CONV_MASS); */ - /* readArray(h_grp, "Velocities", FLOAT, N, 3, gparts, N_total, offset, - * v_full, */ - /* COMPULSORY, internal_units, ic_units, UNIT_CONV_SPEED); */ - /* readArray(h_grp, "ParticleIDs", ULONGLONG, N, 1, gparts, N_total, offset, - * id, */ - /* COMPULSORY, internal_units, ic_units, UNIT_CONV_NO_UNITS); */ - - /* And read everything */ - /* for (int i = 0; i < num_fields; ++i) */ - /* readArray(h_grp, list[i], N, N_total, offset, internal_units, ic_units); - */ } /** @@ -90,17 +74,21 @@ __attribute__((always_inline)) INLINE static void darkmatter_write_particles( const struct UnitSystem* snapshot_units) { /* Write arrays */ - writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", DOUBLE, - Ndm, 3, gparts, Ndm_total, mpi_rank, offset, x, internal_units, - snapshot_units, UNIT_CONV_LENGTH); - writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, Ndm, - 1, gparts, Ndm_total, mpi_rank, offset, mass, internal_units, - snapshot_units, UNIT_CONV_MASS); - writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", FLOAT, - Ndm, 3, gparts, Ndm_total, mpi_rank, offset, v_full, - internal_units, snapshot_units, UNIT_CONV_SPEED); - writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs", - ULONGLONG, Ndm, 1, gparts, Ndm_total, mpi_rank, offset, - id_or_neg_offset, internal_units, snapshot_units, - UNIT_CONV_NO_UNITS); + /* writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", + * DOUBLE, */ + /* Ndm, 3, gparts, Ndm_total, mpi_rank, offset, x, internal_units, + */ + /* snapshot_units, UNIT_CONV_LENGTH); */ + /* writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Masses", FLOAT, + * Ndm, */ + /* 1, gparts, Ndm_total, mpi_rank, offset, mass, internal_units, */ + /* snapshot_units, UNIT_CONV_MASS); */ + /* writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Velocities", + * FLOAT, */ + /* Ndm, 3, gparts, Ndm_total, mpi_rank, offset, v_full, */ + /* internal_units, snapshot_units, UNIT_CONV_SPEED); */ + /* writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "ParticleIDs", */ + /* ULONGLONG, Ndm, 1, gparts, Ndm_total, mpi_rank, offset, */ + /* id_or_neg_offset, internal_units, snapshot_units, */ + /* UNIT_CONV_NO_UNITS); */ } diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h index 3b79378b9a3ee6032c3f909b813191b17d1c2afa..a0be479945b57fb241ddcb16dc9adf5c391c6bae 100644 --- a/src/hydro/Gadget2/hydro_io.h +++ b/src/hydro/Gadget2/hydro_io.h @@ -17,18 +17,15 @@ * ******************************************************************************/ +#include "io_properties.h" +#include "kernel_hydro.h" + /** - * @brief Reads the different particles to the HDF5 file - * - * @param h_grp The HDF5 group in which to read the arrays. - * @param N The number of particles on that MPI rank. - * @param N_total The total number of particles (only used in MPI mode) - * @param offset The offset of the particles for this MPI rank (only used in MPI - * mode) - * @param parts The particle array - * @param internal_units The #UnitSystem used internally - * @param ic_units The #UnitSystem used in the snapshots + * @brief Specifies which particle fields to read from a dataset * + * @param parts The particle array. + * @param list The list of i/o properties to read. + * @param num_fields The number of i/o fields to read. */ void hydro_read_particles(struct part* parts, struct io_props* list, int* num_fields) { @@ -72,11 +69,14 @@ void hydro_read_particles(struct part* parts, struct io_props* list, * @param snapshot_units The #UnitSystem used in the snapshots * */ -__attribute__((always_inline)) INLINE static void hydro_write_particles( - hid_t h_grp, char* fileName, char* partTypeGroupName, FILE* xmfFile, int N, - long long N_total, int mpi_rank, long long offset, struct part* parts, - const struct UnitSystem* internal_units, - const struct UnitSystem* snapshot_units) { +void hydro_write_particles(struct part* parts, struct io_props* list, + int* num_fields) { + + *num_fields = 1; + + /* List what we want to read */ + list[0] = io_make_output_field("Coordinates", DOUBLE, 3, UNIT_CONV_LENGTH, + parts, x); /* Write arrays */ /* writeArray(h_grp, fileName, xmfFile, partTypeGroupName, "Coordinates", diff --git a/src/io_properties.h b/src/io_properties.h index aefac71843e594c46421b0a0cbec41acc5bb047e..5e19420d006ee8043ac418045b6889d885fbd66a 100644 --- a/src/io_properties.h +++ b/src/io_properties.h @@ -63,7 +63,6 @@ struct io_props { io_make_input_field_(name, type, dim, importance, units, \ (char*)(&(part[0]).field), sizeof(part[0])) - /** * @brief Construct an #io_props from its parameters * @@ -94,14 +93,12 @@ struct io_props io_make_input_field_(char name[FIELD_BUFFER_SIZE], return r; } - /** * @brief Constructs an #io_props from its parameters */ -#define io_make_output_field(name, type, dim, units, part, field) \ - io_make_output_field_(name, type, dim, units, \ - (char*)(&(part[0]).field), sizeof(part[0])) - +#define io_make_output_field(name, type, dim, units, part, field) \ + io_make_output_field_(name, type, dim, units, (char*)(&(part[0]).field), \ + sizeof(part[0])) /** * @brief Construct an #io_props from its parameters @@ -116,9 +113,9 @@ struct io_props io_make_input_field_(char name[FIELD_BUFFER_SIZE], * Do not call this function directly. Use the macro defined above. */ struct io_props io_make_output_field_(char name[FIELD_BUFFER_SIZE], - enum DATA_TYPE type, int dimension, - enum UnitConversionFactor units, - char* field, size_t partSize) { + enum DATA_TYPE type, int dimension, + enum UnitConversionFactor units, + char* field, size_t partSize) { struct io_props r; strcpy(r.name, name); r.type = type; @@ -131,6 +128,4 @@ struct io_props io_make_output_field_(char name[FIELD_BUFFER_SIZE], return r; } - - #endif /* SWIFT_IO_PROPERTIES_H */ diff --git a/src/single_io.c b/src/single_io.c index 6865d8b7b96a2a98df78c43bba88741256f23467..96b3d26a23043cd2c1b4dd0e4487eb55acdd249f 100644 --- a/src/single_io.c +++ b/src/single_io.c @@ -38,6 +38,8 @@ #include "common_io.h" #include "engine.h" #include "error.h" +#include "gravity_io.h" +#include "hydro_io.h" #include "io_properties.h" #include "kernel_hydro.h" #include "part.h" @@ -170,36 +172,34 @@ void readArray(hid_t h_grp, const struct io_props prop, size_t N, *the part array * will be written once the structures have been stabilized. */ -void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, - char* partTypeGroupName, char* name, enum DATA_TYPE type, - int N, int dim, char* part_c, size_t partSize, - const struct UnitSystem* internal_units, - const struct UnitSystem* snapshot_units, - enum UnitConversionFactor convFactor) { +void writeArray(hid_t grp, char* fileName, FILE* xmfFile, + char* partTypeGroupName, const struct io_props props, size_t N, + const struct UnitSystem* internal_units, + const struct UnitSystem* snapshot_units) { - const size_t typeSize = sizeOfType(type); - const size_t copySize = typeSize * dim; - const size_t num_elements = N * dim; + const size_t typeSize = sizeOfType(props.type); + const size_t copySize = typeSize * props.dimension; + const size_t num_elements = N * props.dimension; - /* message("Writing '%s' array...", name); */ + /* message("Writing '%s' array...", props.name); */ /* Allocate temporary buffer */ - void* temp = malloc(N * dim * sizeOfType(type)); + void* temp = malloc(num_elements * sizeOfType(props.type)); if (temp == NULL) error("Unable to allocate memory for temporary buffer"); /* Copy particle data to temporary buffer */ char* temp_c = temp; for (int i = 0; i < N; ++i) - memcpy(&temp_c[i * copySize], part_c + i * partSize, copySize); + memcpy(&temp_c[i * copySize], props.field + i * props.partSize, copySize); /* Unit conversion if necessary */ const double factor = - units_conversion_factor(internal_units, snapshot_units, convFactor); + units_conversion_factor(internal_units, snapshot_units, props.units); if (factor != 1.) { message("aaa"); - if (isDoublePrecision(type)) { + if (isDoublePrecision(props.type)) { double* temp_d = temp; for (int i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -214,15 +214,15 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, hsize_t shape[2]; hsize_t chunk_shape[2]; if (h_space < 0) { - error("Error while creating data space for field '%s'.", name); + error("Error while creating data space for field '%s'.", props.name); } - if (dim > 1) { + if (props.dimension > 1) { rank = 2; shape[0] = N; - shape[1] = dim; + shape[1] = props.dimension; chunk_shape[0] = 1 << 16; /* Just a guess...*/ - chunk_shape[1] = dim; + chunk_shape[1] = props.dimension; } else { rank = 1; shape[0] = N; @@ -237,7 +237,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, /* Change shape of data space */ hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL); if (h_err < 0) { - error("Error while changing data space shape for field '%s'.", name); + error("Error while changing data space shape for field '%s'.", props.name); } /* Dataset properties */ @@ -247,40 +247,43 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, h_err = H5Pset_chunk(h_prop, rank, chunk_shape); if (h_err < 0) { error("Error while setting chunk size (%lld, %lld) for field '%s'.", - chunk_shape[0], chunk_shape[1], name); + chunk_shape[0], chunk_shape[1], props.name); } /* Impose data compression */ h_err = H5Pset_deflate(h_prop, 4); if (h_err < 0) { - error("Error while setting compression options for field '%s'.", name); + error("Error while setting compression options for field '%s'.", + props.name); } /* Create dataset */ - const hid_t h_data = H5Dcreate(grp, name, hdf5Type(type), h_space, + const hid_t h_data = H5Dcreate(grp, props.name, hdf5Type(props.type), h_space, H5P_DEFAULT, h_prop, H5P_DEFAULT); if (h_data < 0) { - error("Error while creating dataspace '%s'.", name); + error("Error while creating dataspace '%s'.", props.name); } /* Write temporary buffer to HDF5 dataspace */ - h_err = H5Dwrite(h_data, hdf5Type(type), h_space, H5S_ALL, H5P_DEFAULT, temp); + h_err = H5Dwrite(h_data, hdf5Type(props.type), h_space, H5S_ALL, H5P_DEFAULT, + temp); if (h_err < 0) { - error("Error while writing data array '%s'.", name); + error("Error while writing data array '%s'.", props.name); } /* Write XMF description for this data set */ - writeXMFline(xmfFile, fileName, partTypeGroupName, name, N, dim, type); + writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N, + props.dimension, props.type); /* Write unit conversion factors for this data set */ char buffer[FIELD_BUFFER_SIZE]; - units_cgs_conversion_string(buffer, snapshot_units, convFactor); + units_cgs_conversion_string(buffer, snapshot_units, props.units); writeAttribute_d(h_data, "CGS conversion factor", - units_cgs_conversion_factor(snapshot_units, convFactor)); + units_cgs_conversion_factor(snapshot_units, props.units)); writeAttribute_f(h_data, "h-scale exponent", - units_h_factor(snapshot_units, convFactor)); + units_h_factor(snapshot_units, props.units)); writeAttribute_f(h_data, "a-scale exponent", - units_a_factor(snapshot_units, convFactor)); + units_a_factor(snapshot_units, props.units)); writeAttribute_s(h_data, "Conversion factor", buffer); /* Free and close everything */ @@ -290,41 +293,6 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, H5Sclose(h_space); } -/** - * @brief A helper macro to call the readArrayBackEnd function more easily. - * - * @param grp The group in which to write. - * @param fileName The name of the file in which the data is written - * @param xmfFile The FILE used to write the XMF description - * @param name The name of the array to write. - * @param partTypeGroupName The name of the group containing the particles in - *the HDF5 file. - * @param type The #DATA_TYPE of the array. - * @param N The number of particles to write. - * @param dim The dimension of the data (1 for scalar, 3 for vector) - * @param part A (char*) pointer on the first occurrence of the field of - * interest in the parts array - * @param N_total Unused parameter in non-MPI mode - * @param mpi_rank Unused parameter in non-MPI mode - * @param offset Unused parameter in non-MPI mode - * @param field The name (code name) of the field to read from. - * @param internal_units The #UnitSystem used internally - * @param snapshot_units The #UnitSystem used in the snapshots - * @param convFactor The UnitConversionFactor for this array - * - */ -#define writeArray(grp, fileName, xmfFile, partTypeGroupName, name, type, N, \ - dim, part, N_total, mpi_rank, offset, field, \ - internal_units, snapshot_units, convFactor) \ - writeArrayBackEnd(grp, fileName, xmfFile, partTypeGroupName, name, type, N, \ - dim, (char*)(&(part[0]).field), sizeof(part[0]), \ - internal_units, snapshot_units, convFactor) - -/* Import the right hydro definition */ -#include "hydro_io.h" -/* Import the right gravity definition */ -#include "gravity_io.h" - /** * @brief Reads an HDF5 initial condition file (GADGET-3 type) * @@ -663,15 +631,16 @@ void write_output_single(struct engine* e, const char* baseName, error("Error while creating particle group.\n"); } - /* message("Writing particle arrays..."); */ + int num_fields = 0; + struct io_props list[100]; + size_t N; /* Write particle fields from the particle structure */ switch (ptype) { case GAS: - hydro_write_particles(h_grp, fileName, partTypeGroupName, xmfFile, Ngas, - Ngas, 0, 0, parts, internal_units, - snapshot_units); + N = Ngas; + hydro_write_particles(parts, list, &num_fields); break; case DM: @@ -684,19 +653,24 @@ void write_output_single(struct engine* e, const char* baseName, /* Collect the DM particles from gpart */ collect_dm_gparts(gparts, Ntot, dmparts, Ndm); - /* Write DM particles */ - darkmatter_write_particles(h_grp, fileName, partTypeGroupName, xmfFile, - Ndm, Ndm, 0, 0, dmparts, internal_units, - snapshot_units); - - /* Free temporary array */ - free(dmparts); - break; + /* /\* Write DM particles *\/ */ + /* darkmatter_write_particles(h_grp, fileName, partTypeGroupName, xmfFile, + */ + /* Ndm, Ndm, 0, 0, dmparts, internal_units, */ + /* snapshot_units); */ default: error("Particle Type %d not yet supported. Aborting", ptype); } + /* Write everything */ + for (int i = 0; i < num_fields; ++i) + writeArray(h_grp, fileName, xmfFile, partTypeGroupName, list[i], N, + internal_units, snapshot_units); + + /* Free temporary array */ + free(dmparts); + /* Close particle group */ H5Gclose(h_grp);