diff --git a/examples/MultiTypes/makeIC.py b/examples/MultiTypes/makeIC.py index 88330a6de25e23bf007615f9e9ca24e66065413c..41a5ef5f2ffc4073ef8a4e93a130b43fcbe2c1f5 100644 --- a/examples/MultiTypes/makeIC.py +++ b/examples/MultiTypes/makeIC.py @@ -39,143 +39,151 @@ Ldm = int(sys.argv[2]) # Number of particles along one axis massStars = 0.1 Lstars = int(sys.argv[3]) # Number of particles along one axis -fileName = "multiTypes.hdf5" +fileBaseName = "multiTypes" +num_files = int(sys.argv[4]) #--------------------------------------------------- -numGas = Lgas**3 -massGas = boxSize**3 * rhoGas / numGas +numGas_tot = Lgas**3 +massGas = boxSize**3 * rhoGas / numGas_tot internalEnergy = P / ((gamma - 1.)*rhoGas) -numDM = Ldm**3 -massDM = boxSize**3 * rhoDM / numDM +numDM_tot = Ldm**3 +massDM = boxSize**3 * rhoDM / numDM_tot -numStars = Lstars**3 +numStars_tot = Lstars**3 massStars = massDM * massStars #-------------------------------------------------- -#File -file = h5py.File(fileName, 'w') - -# Header -grp = file.create_group("/Header") -grp.attrs["BoxSize"] = boxSize -grp.attrs["NumPart_Total"] = [numGas, numDM, 0, 0, numStars, 0] -grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0] -grp.attrs["NumPart_ThisFile"] = [numGas, numDM, 0, 0, numStars, 0] -grp.attrs["Time"] = 0.0 -grp.attrs["NumFilesPerSnapshot"] = 1 -grp.attrs["MassTable"] = [0.0, massDM, 0.0, 0.0, 0.0, 0.0] -grp.attrs["Flag_Entropy_ICs"] = 0 -grp.attrs["Dimension"] = 3 - -#Runtime parameters -grp = file.create_group("/RuntimePars") -grp.attrs["PeriodicBoundariesOn"] = periodic - -#Units -grp = file.create_group("/Units") -grp.attrs["Unit length in cgs (U_L)"] = 1. -grp.attrs["Unit mass in cgs (U_M)"] = 1. -grp.attrs["Unit time in cgs (U_t)"] = 1. -grp.attrs["Unit current in cgs (U_I)"] = 1. -grp.attrs["Unit temperature in cgs (U_T)"] = 1. - - -# Gas Particle group -grp = file.create_group("/PartType0") - -v = zeros((numGas, 3)) -ds = grp.create_dataset('Velocities', (numGas, 3), 'f') -ds[()] = v -v = zeros(1) - -m = full((numGas, 1), massGas) -ds = grp.create_dataset('Masses', (numGas,1), 'f') -ds[()] = m -m = zeros(1) - -h = full((numGas, 1), eta * boxSize / Lgas) -ds = grp.create_dataset('SmoothingLength', (numGas,1), 'f') -ds[()] = h -h = zeros(1) - -u = full((numGas, 1), internalEnergy) -ds = grp.create_dataset('InternalEnergy', (numGas,1), 'f') -ds[()] = u -u = zeros(1) - -ids = linspace(0, numGas, numGas, endpoint=False).reshape((numGas,1)) -ds = grp.create_dataset('ParticleIDs', (numGas, 1), 'L') -ds[()] = ids + 1 -x = ids % Lgas; -y = ((ids - x) / Lgas) % Lgas; -z = (ids - x - Lgas * y) / Lgas**2; -coords = zeros((numGas, 3)) -coords[:,0] = z[:,0] * boxSize / Lgas + boxSize / (2*Lgas) -coords[:,1] = y[:,0] * boxSize / Lgas + boxSize / (2*Lgas) -coords[:,2] = x[:,0] * boxSize / Lgas + boxSize / (2*Lgas) -ds = grp.create_dataset('Coordinates', (numGas, 3), 'd') -ds[()] = coords - - - - - -# DM Particle group -grp = file.create_group("/PartType1") - -v = zeros((numDM, 3)) -ds = grp.create_dataset('Velocities', (numDM, 3), 'f') -ds[()] = v -v = zeros(1) - -m = full((numDM, 1), massDM) -ds = grp.create_dataset('Masses', (numDM,1), 'f') -ds[()] = m -m = zeros(1) - -ids = linspace(0, numDM, numDM, endpoint=False).reshape((numDM,1)) -ds = grp.create_dataset('ParticleIDs', (numDM, 1), 'L') -ds[()] = ids + Lgas**3 + 1 -x = ids % Ldm; -y = ((ids - x) / Ldm) % Ldm; -z = (ids - x - Ldm * y) / Ldm**2; -coords = zeros((numDM, 3)) -coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -ds = grp.create_dataset('Coordinates', (numDM, 3), 'd') -ds[()] = coords - - - -# Star Particle group -grp = file.create_group("/PartType4") - -v = zeros((numStars, 3)) -ds = grp.create_dataset('Velocities', (numStars, 3), 'f') -ds[()] = v -v = zeros(1) - -m = full((numStars, 1), massStars) -ds = grp.create_dataset('Masses', (numStars,1), 'f') -ds[()] = m -m = zeros(1) - -ids = linspace(0, numStars, numStars, endpoint=False).reshape((numStars,1)) -ds = grp.create_dataset('ParticleIDs', (numStars, 1), 'L') -ds[()] = ids + Lgas**3 + 1 -x = ids % Ldm; -y = ((ids - x) / Ldm) % Ldm; -z = (ids - x - Ldm * y) / Ldm**2; -coords = zeros((numStars, 3)) -coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm) -ds = grp.create_dataset('Coordinates', (numStars, 3), 'd') -ds[()] = coords - - -file.close() +offsetGas = 0 +offsetDM = 0 +offsetStars = 0 + +for n in range(num_files): + + # File name + if num_files == 1: + fileName = fileBaseName + ".hdf5" + else: + fileName = fileBaseName + ".%d.hdf5"%n + + # File + file = h5py.File(fileName, 'w') + + # Number of particles + numGas = numGas_tot / num_files + numDM = numDM_tot / num_files + numStars = numStars_tot / num_files + + if n == num_files - 1: + numGas += numGas_tot % num_files + numDM += numDM_tot % num_files + numStars += numStars_tot % num_files + + + # Header + grp = file.create_group("/Header") + grp.attrs["BoxSize"] = boxSize + grp.attrs["NumPart_Total"] = [numGas_tot, numDM_tot, 0, 0, numStars_tot, 0] + grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0] + grp.attrs["NumPart_ThisFile"] = [numGas, numDM, 0, 0, numStars, 0] + grp.attrs["Time"] = 0.0 + grp.attrs["NumFilesPerSnapshot"] = num_files + grp.attrs["MassTable"] = [0.0, massDM, 0.0, 0.0, 0.0, 0.0] + grp.attrs["Flag_Entropy_ICs"] = 0 + grp.attrs["Dimension"] = 3 + + #Runtime parameters + grp = file.create_group("/RuntimePars") + grp.attrs["PeriodicBoundariesOn"] = periodic + + #Units + grp = file.create_group("/Units") + grp.attrs["Unit length in cgs (U_L)"] = 1. + grp.attrs["Unit mass in cgs (U_M)"] = 1. + grp.attrs["Unit time in cgs (U_t)"] = 1. + grp.attrs["Unit current in cgs (U_I)"] = 1. + grp.attrs["Unit temperature in cgs (U_T)"] = 1. + + + # Gas Particle group + grp = file.create_group("/PartType0") + + v = zeros((numGas, 3)) + ds = grp.create_dataset('Velocities', (numGas, 3), 'f', data=v) + + m = full((numGas, 1), massGas) + ds = grp.create_dataset('Masses', (numGas,1), 'f', data=m) + + h = full((numGas, 1), eta * boxSize / Lgas) + ds = grp.create_dataset('SmoothingLength', (numGas,1), 'f', data=h) + + u = full((numGas, 1), internalEnergy) + ds = grp.create_dataset('InternalEnergy', (numGas,1), 'f', data=u) + + ids = linspace(offsetGas, offsetGas+numGas, numGas, endpoint=False).reshape((numGas,1)) + ds = grp.create_dataset('ParticleIDs', (numGas, 1), 'L', data=ids+1) + x = ids % Lgas; + y = ((ids - x) / Lgas) % Lgas; + z = (ids - x - Lgas * y) / Lgas**2; + coords = zeros((numGas, 3)) + coords[:,0] = z[:,0] * boxSize / Lgas + boxSize / (2*Lgas) + coords[:,1] = y[:,0] * boxSize / Lgas + boxSize / (2*Lgas) + coords[:,2] = x[:,0] * boxSize / Lgas + boxSize / (2*Lgas) + ds = grp.create_dataset('Coordinates', (numGas, 3), 'd', data=coords) + + + + # DM Particle group + grp = file.create_group("/PartType1") + + v = zeros((numDM, 3)) + ds = grp.create_dataset('Velocities', (numDM, 3), 'f', data=v) + + m = full((numDM, 1), massDM) + ds = grp.create_dataset('Masses', (numDM,1), 'f', data=m) + + ids = linspace(offsetDM, offsetDM+numDM, numDM, endpoint=False).reshape((numDM,1)) + ds = grp.create_dataset('ParticleIDs', (numDM, 1), 'L', data=ids + numGas_tot + 1) + ds[()] = ids + Lgas**3 + 1 + x = ids % Ldm; + y = ((ids - x) / Ldm) % Ldm; + z = (ids - x - Ldm * y) / Ldm**2; + coords = zeros((numDM, 3)) + coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + ds = grp.create_dataset('Coordinates', (numDM, 3), 'd', data=coords) + + + + # Star Particle group + grp = file.create_group("/PartType4") + + v = zeros((numStars, 3)) + ds = grp.create_dataset('Velocities', (numStars, 3), 'f', data=v) + + m = full((numStars, 1), massStars) + ds = grp.create_dataset('Masses', (numStars,1), 'f', data=m) + + ids = linspace(0, numStars, numStars, endpoint=False).reshape((numStars,1)) + ds = grp.create_dataset('ParticleIDs', (numStars, 1), 'L', data=ids + numGas_tot + numDM_tot + 1) + x = ids % Ldm; + y = ((ids - x) / Ldm) % Ldm; + z = (ids - x - Ldm * y) / Ldm**2; + coords = zeros((numStars, 3)) + coords[:,0] = z[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + coords[:,1] = y[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + coords[:,2] = x[:,0] * boxSize / Ldm + boxSize / (2*Ldm) + ds = grp.create_dataset('Coordinates', (numStars, 3), 'd', data=coords) + + + + # Shift stuff + offsetGas += numGas + offsetDM += numDM + offsetStars += numStars + + file.close() + diff --git a/examples/MultiTypes/multiTypes.yml b/examples/MultiTypes/multiTypes.yml index 4d54f95fcdd09464b03d0f9987398cd2710b2e44..05d45595e9ec43b0849ed574f6b9922c19021a33 100644 --- a/examples/MultiTypes/multiTypes.yml +++ b/examples/MultiTypes/multiTypes.yml @@ -32,6 +32,7 @@ SPH: # Parameters related to the initial conditions InitialConditions: file_name: ./multiTypes.hdf5 # The file to read + replicate: 2 # Replicate all particles twice along each axis # External potential parameters PointMassPotential: diff --git a/examples/MultiTypes/run.sh b/examples/MultiTypes/run.sh index 508a5097f8961f446a51204e889875e33d4f634e..38cba70393861539f18bf9fa360d51f46dd3367d 100755 --- a/examples/MultiTypes/run.sh +++ b/examples/MultiTypes/run.sh @@ -4,7 +4,7 @@ if [ ! -e multiTypes.hdf5 ] then echo "Generating initial conditions for the multitype box example..." - python makeIC.py 17 24 12 + python makeIC.py 9 13 7 1 fi ../swift -s -g -S -t 1 multiTypes.yml 2>&1 | tee output.log diff --git a/examples/main.c b/examples/main.c index 0bb4d03d8e0feb9f5b61adf643741a8974bd9320..967db3c09eba9e8ae372343e76ddfc3a84c0daf0 100644 --- a/examples/main.c +++ b/examples/main.c @@ -54,7 +54,7 @@ struct profiler prof; void print_help_message() { printf("\nUsage: swift [OPTION]... PARAMFILE\n"); - printf(" swift_mpi [OPTION]... PARAMFILE\n"); + printf(" swift_mpi [OPTION]... PARAMFILE\n\n"); printf("Valid options are:\n"); printf(" %2s %8s %s\n", "-a", "", "Pin runners using processor affinity"); @@ -371,6 +371,8 @@ int main(int argc, char *argv[]) { /* Read particles and space information from (GADGET) ICs */ char ICfileName[200] = ""; parser_get_param_string(params, "InitialConditions:file_name", ICfileName); + const int replicate = + parser_get_opt_param_int(params, "InitialConditions:replicate", 1); if (myrank == 0) message("Reading ICs from file '%s'", ICfileName); fflush(stdout); @@ -441,7 +443,7 @@ int main(int argc, char *argv[]) { if (myrank == 0) clocks_gettime(&tic); struct space s; space_init(&s, params, dim, parts, gparts, sparts, Ngas, Ngpart, Nspart, - periodic, with_self_gravity, talking, dry_run); + periodic, replicate, with_self_gravity, talking, dry_run); if (myrank == 0) { clocks_gettime(&toc); message("space_init took %.3f %s.", clocks_diff(&tic, &toc), @@ -458,6 +460,7 @@ int main(int argc, char *argv[]) { s.cdim[1], s.cdim[2]); message("%zi parts in %i cells.", s.nr_parts, s.tot_cells); message("%zi gparts in %i cells.", s.nr_gparts, s.tot_cells); + message("%zi sparts in %i cells.", s.nr_sparts, s.tot_cells); message("maximum depth is %d.", s.maxdepth); fflush(stdout); } @@ -521,6 +524,18 @@ int main(int argc, char *argv[]) { for (k = 0; k < runner_hist_N; k++) runner_hist_bins[k] = 0; #endif +#if defined(WITH_MPI) + N_long[0] = s.nr_parts; + N_long[1] = s.nr_gparts; + N_long[2] = s.nr_sparts; + MPI_Reduce(&N_long, &N_total, 3, MPI_LONG_LONG_INT, MPI_SUM, 0, + MPI_COMM_WORLD); +#else + N_total[0] = s.nr_parts; + N_total[1] = s.nr_gparts; + N_total[2] = s.nr_sparts; +#endif + /* Get some info to the user. */ if (myrank == 0) { long long N_DM = N_total[1] - N_total[2] - N_total[0]; diff --git a/examples/parameter_example.yml b/examples/parameter_example.yml index 9681e880e9f4b44d4f77195eecf5b8642266ce52..31b948ea4c59de42bce7c3b80b8ae2526b10e0cf 100644 --- a/examples/parameter_example.yml +++ b/examples/parameter_example.yml @@ -55,6 +55,7 @@ InitialConditions: shift_x: 0. # (Optional) A shift to apply to all particles read from the ICs (in internal units). shift_y: 0. shift_z: 0. + replicate: 2 # (Optional) Replicate all particles along each axis a given number of times. Default 1. # Parameters governing domain decomposition DomainDecomposition: diff --git a/src/Makefile.am b/src/Makefile.am index 515b8aed02e92334e92fb8414a5b4e90db5cbbe1..b0615db01c467f243ba3eb0b961370c31a26fe22 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -45,7 +45,7 @@ include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \ physical_constants.h physical_constants_cgs.h potential.h version.h \ hydro_properties.h riemann.h threadpool.h cooling.h cooling_struct.h sourceterms.h \ sourceterms_struct.h statistics.h memswap.h cache.h runner_doiact_vec.h profiler.h \ - dump.h logger.h active.h timeline.h + dump.h logger.h active.h timeline.h xmf.h # Common source files AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \ @@ -54,7 +54,8 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \ kernel_hydro.c tools.c part.c partition.c clocks.c parser.c \ physical_constants.c potential.c hydro_properties.c \ runner_doiact_fft.c threadpool.c cooling.c sourceterms.c \ - statistics.c runner_doiact_vec.c profiler.c dump.c logger.c + statistics.c runner_doiact_vec.c profiler.c dump.c logger.c \ + part_type.c xmf.c # Include files for distribution, not installation. nobase_noinst_HEADERS = align.h approx_math.h atomic.h cycle.h error.h inline.h kernel_hydro.h kernel_gravity.h \ @@ -109,15 +110,18 @@ version_string.h: version_string.h.in $(AM_SOURCES) $(include_HEADERS) $(noinst_ if test "X$(GIT_CMD)" != "X"; then \ GIT_REVISION=`$(GIT_CMD) describe --abbrev=8 --always --tags --dirty`; \ GIT_BRANCH=`$(GIT_CMD) branch | sed -n 's/^\* \(.*\)/\1/p'`; \ + GIT_DATE=`$(GIT_CMD) log -1 --format=%ci`; \ sed -e "s,@PACKAGE_VERSION\@,$(PACKAGE_VERSION)," \ -e "s,@GIT_REVISION\@,$${GIT_REVISION}," \ -e "s|@GIT_BRANCH\@|$${GIT_BRANCH}|" \ + -e "s|@GIT_DATE\@|$${GIT_DATE}|" \ -e "s|@SWIFT_CFLAGS\@|$(CFLAGS)|" $< > version_string.h; \ else \ if test ! -f version_string.h; then \ sed -e "s,@PACKAGE_VERSION\@,$(PACKAGE_VERSION)," \ -e "s,@GIT_REVISION\@,unknown," \ -e "s,@GIT_BRANCH\@,unknown," \ + -e "s,@GIT_DATE\@,unknown," \ -e "s|@SWIFT_CFLAGS\@|$(CFLAGS)|" $< > version_string.h; \ fi; \ fi diff --git a/src/common_io.c b/src/common_io.c index 82c00cf5bed7118276e0595e3d9c590d29bdda74..567ae7249dba788e6b379f8c05eb1139bff6bb94 100644 --- a/src/common_io.c +++ b/src/common_io.c @@ -47,9 +47,6 @@ #include "units.h" #include "version.h" -const char* particle_type_names[NUM_PARTICLE_TYPES] = { - "Gas", "DM", "Boundary", "Dummy", "Star", "BH"}; - /** * @brief Converts a C data type to the HDF5 equivalent. * @@ -57,7 +54,7 @@ const char* particle_type_names[NUM_PARTICLE_TYPES] = { * to change the exact storage types matching the code types in a transparent *way. */ -hid_t hdf5Type(enum DATA_TYPE type) { +hid_t io_hdf5_type(enum IO_DATA_TYPE type) { switch (type) { case INT: @@ -87,7 +84,7 @@ hid_t hdf5Type(enum DATA_TYPE type) { /** * @brief Returns the memory size of the data type */ -size_t sizeOfType(enum DATA_TYPE type) { +size_t io_sizeof_type(enum IO_DATA_TYPE type) { switch (type) { case INT: @@ -119,7 +116,7 @@ size_t sizeOfType(enum DATA_TYPE type) { * * Returns an error if the type is not FLOAT or DOUBLE */ -int isDoublePrecision(enum DATA_TYPE type) { +int io_is_double_precision(enum IO_DATA_TYPE type) { switch (type) { case FLOAT: @@ -142,7 +139,8 @@ int isDoublePrecision(enum DATA_TYPE type) { * * Calls #error() if an error occurs. */ -void readAttribute(hid_t grp, char* name, enum DATA_TYPE type, void* data) { +void io_read_attribute(hid_t grp, char* name, enum IO_DATA_TYPE type, + void* data) { hid_t h_attr = 0, h_err = 0; h_attr = H5Aopen(grp, name, H5P_DEFAULT); @@ -150,7 +148,7 @@ void readAttribute(hid_t grp, char* name, enum DATA_TYPE type, void* data) { error("Error while opening attribute '%s'", name); } - h_err = H5Aread(h_attr, hdf5Type(type), data); + h_err = H5Aread(h_attr, io_hdf5_type(type), data); if (h_err < 0) { error("Error while reading attribute '%s'", name); } @@ -169,8 +167,8 @@ void readAttribute(hid_t grp, char* name, enum DATA_TYPE type, void* data) { * * Calls #error() if an error occurs. */ -void writeAttribute(hid_t grp, const char* name, enum DATA_TYPE type, - void* data, int num) { +void io_write_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type, + void* data, int num) { hid_t h_space = 0, h_attr = 0, h_err = 0; hsize_t dim[1] = {num}; @@ -184,12 +182,12 @@ void writeAttribute(hid_t grp, const char* name, enum DATA_TYPE type, error("Error while changing dataspace shape for attribute '%s'.", name); } - h_attr = H5Acreate1(grp, name, hdf5Type(type), h_space, H5P_DEFAULT); + h_attr = H5Acreate1(grp, name, io_hdf5_type(type), h_space, H5P_DEFAULT); if (h_attr < 0) { error("Error while creating attribute '%s'.", name); } - h_err = H5Awrite(h_attr, hdf5Type(type), data); + h_err = H5Awrite(h_attr, io_hdf5_type(type), data); if (h_err < 0) { error("Error while reading attribute '%s'.", name); } @@ -208,8 +206,8 @@ void writeAttribute(hid_t grp, const char* name, enum DATA_TYPE type, * * Calls #error() if an error occurs. */ -void writeStringAttribute(hid_t grp, const char* name, const char* str, - int length) { +void io_writeStringAttribute(hid_t grp, const char* name, const char* str, + int length) { hid_t h_space = 0, h_attr = 0, h_err = 0, h_type = 0; h_space = H5Screate(H5S_SCALAR); @@ -248,8 +246,8 @@ void writeStringAttribute(hid_t grp, const char* name, const char* str, * @param name The name of the attribute * @param data The value to write */ -void writeAttribute_d(hid_t grp, const char* name, double data) { - writeAttribute(grp, name, DOUBLE, &data, 1); +void io_write_attribute_d(hid_t grp, const char* name, double data) { + io_write_attribute(grp, name, DOUBLE, &data, 1); } /** @@ -258,8 +256,8 @@ void writeAttribute_d(hid_t grp, const char* name, double data) { * @param name The name of the attribute * @param data The value to write */ -void writeAttribute_f(hid_t grp, const char* name, float data) { - writeAttribute(grp, name, FLOAT, &data, 1); +void io_write_attribute_f(hid_t grp, const char* name, float data) { + io_write_attribute(grp, name, FLOAT, &data, 1); } /** @@ -269,8 +267,8 @@ void writeAttribute_f(hid_t grp, const char* name, float data) { * @param data The value to write */ -void writeAttribute_i(hid_t grp, const char* name, int data) { - writeAttribute(grp, name, INT, &data, 1); +void io_write_attribute_i(hid_t grp, const char* name, int data) { + io_write_attribute(grp, name, INT, &data, 1); } /** @@ -279,8 +277,8 @@ void writeAttribute_i(hid_t grp, const char* name, int data) { * @param name The name of the attribute * @param data The value to write */ -void writeAttribute_l(hid_t grp, const char* name, long data) { - writeAttribute(grp, name, LONG, &data, 1); +void io_write_attribute_l(hid_t grp, const char* name, long data) { + io_write_attribute(grp, name, LONG, &data, 1); } /** @@ -289,8 +287,8 @@ void writeAttribute_l(hid_t grp, const char* name, long data) { * @param name The name of the attribute * @param str The string to write */ -void writeAttribute_s(hid_t grp, const char* name, const char* str) { - writeStringAttribute(grp, name, str, strlen(str)); +void io_write_attribute_s(hid_t grp, const char* name, const char* str) { + io_writeStringAttribute(grp, name, str, strlen(str)); } /** @@ -300,7 +298,7 @@ void writeAttribute_s(hid_t grp, const char* name, const char* str) { * * If the 'Units' group does not exist in the ICs, cgs units will be assumed */ -void readUnitSystem(hid_t h_file, struct UnitSystem* us) { +void io_read_UnitSystem(hid_t h_file, struct UnitSystem* us) { hid_t h_grp = H5Gopen(h_file, "/Units", H5P_DEFAULT); @@ -318,14 +316,16 @@ void readUnitSystem(hid_t h_file, struct UnitSystem* us) { } /* Ok, Read the damn thing */ - readAttribute(h_grp, "Unit length in cgs (U_L)", DOUBLE, - &us->UnitLength_in_cgs); - readAttribute(h_grp, "Unit mass in cgs (U_M)", DOUBLE, &us->UnitMass_in_cgs); - readAttribute(h_grp, "Unit time in cgs (U_t)", DOUBLE, &us->UnitTime_in_cgs); - readAttribute(h_grp, "Unit current in cgs (U_I)", DOUBLE, - &us->UnitCurrent_in_cgs); - readAttribute(h_grp, "Unit temperature in cgs (U_T)", DOUBLE, - &us->UnitTemperature_in_cgs); + io_read_attribute(h_grp, "Unit length in cgs (U_L)", DOUBLE, + &us->UnitLength_in_cgs); + io_read_attribute(h_grp, "Unit mass in cgs (U_M)", DOUBLE, + &us->UnitMass_in_cgs); + io_read_attribute(h_grp, "Unit time in cgs (U_t)", DOUBLE, + &us->UnitTime_in_cgs); + io_read_attribute(h_grp, "Unit current in cgs (U_I)", DOUBLE, + &us->UnitCurrent_in_cgs); + io_read_attribute(h_grp, "Unit temperature in cgs (U_T)", DOUBLE, + &us->UnitTemperature_in_cgs); /* Clean up */ H5Gclose(h_grp); @@ -337,23 +337,23 @@ void readUnitSystem(hid_t h_file, struct UnitSystem* us) { * @param us The UnitSystem to dump * @param groupName The name of the HDF5 group to write to */ -void writeUnitSystem(hid_t h_file, const struct UnitSystem* us, - const char* groupName) { +void io_write_UnitSystem(hid_t h_file, const struct UnitSystem* us, + const char* groupName) { hid_t h_grpunit = 0; h_grpunit = H5Gcreate1(h_file, groupName, 0); if (h_grpunit < 0) error("Error while creating Unit System group"); - writeAttribute_d(h_grpunit, "Unit mass in cgs (U_M)", - units_get_base_unit(us, UNIT_MASS)); - writeAttribute_d(h_grpunit, "Unit length in cgs (U_L)", - units_get_base_unit(us, UNIT_LENGTH)); - writeAttribute_d(h_grpunit, "Unit time in cgs (U_t)", - units_get_base_unit(us, UNIT_TIME)); - writeAttribute_d(h_grpunit, "Unit current in cgs (U_I)", - units_get_base_unit(us, UNIT_CURRENT)); - writeAttribute_d(h_grpunit, "Unit temperature in cgs (U_T)", - units_get_base_unit(us, UNIT_TEMPERATURE)); + io_write_attribute_d(h_grpunit, "Unit mass in cgs (U_M)", + units_get_base_unit(us, UNIT_MASS)); + io_write_attribute_d(h_grpunit, "Unit length in cgs (U_L)", + units_get_base_unit(us, UNIT_LENGTH)); + io_write_attribute_d(h_grpunit, "Unit time in cgs (U_t)", + units_get_base_unit(us, UNIT_TIME)); + io_write_attribute_d(h_grpunit, "Unit current in cgs (U_I)", + units_get_base_unit(us, UNIT_CURRENT)); + io_write_attribute_d(h_grpunit, "Unit temperature in cgs (U_T)", + units_get_base_unit(us, UNIT_TEMPERATURE)); H5Gclose(h_grpunit); } @@ -362,30 +362,32 @@ void writeUnitSystem(hid_t h_file, const struct UnitSystem* us, * @brief Writes the code version to the file * @param h_file The (opened) HDF5 file in which to write */ -void writeCodeDescription(hid_t h_file) { +void io_write_code_description(hid_t h_file) { hid_t h_grpcode = 0; h_grpcode = H5Gcreate1(h_file, "/Code", 0); if (h_grpcode < 0) error("Error while creating code group"); - writeAttribute_s(h_grpcode, "Code Version", package_version()); - writeAttribute_s(h_grpcode, "Compiler Name", compiler_name()); - writeAttribute_s(h_grpcode, "Compiler Version", compiler_version()); - writeAttribute_s(h_grpcode, "Git Branch", git_branch()); - writeAttribute_s(h_grpcode, "Git Revision", git_revision()); - writeAttribute_s(h_grpcode, "Configuration options", configuration_options()); - writeAttribute_s(h_grpcode, "CFLAGS", compilation_cflags()); - writeAttribute_s(h_grpcode, "HDF5 library version", hdf5_version()); + io_write_attribute_s(h_grpcode, "Code Version", package_version()); + io_write_attribute_s(h_grpcode, "Compiler Name", compiler_name()); + io_write_attribute_s(h_grpcode, "Compiler Version", compiler_version()); + io_write_attribute_s(h_grpcode, "Git Branch", git_branch()); + io_write_attribute_s(h_grpcode, "Git Revision", git_revision()); + io_write_attribute_s(h_grpcode, "Git Date", git_date()); + io_write_attribute_s(h_grpcode, "Configuration options", + configuration_options()); + io_write_attribute_s(h_grpcode, "CFLAGS", compilation_cflags()); + io_write_attribute_s(h_grpcode, "HDF5 library version", hdf5_version()); #ifdef HAVE_FFTW - writeAttribute_s(h_grpcode, "FFTW library version", fftw3_version()); + io_write_attribute_s(h_grpcode, "FFTW library version", fftw3_version()); #endif #ifdef WITH_MPI - writeAttribute_s(h_grpcode, "MPI library", mpi_version()); + io_write_attribute_s(h_grpcode, "MPI library", mpi_version()); #ifdef HAVE_METIS - writeAttribute_s(h_grpcode, "METIS library version", metis_version()); + io_write_attribute_s(h_grpcode, "METIS library version", metis_version()); #endif #else - writeAttribute_s(h_grpcode, "MPI library", "Non-MPI version of SWIFT"); + io_write_attribute_s(h_grpcode, "MPI library", "Non-MPI version of SWIFT"); #endif H5Gclose(h_grpcode); } @@ -405,170 +407,6 @@ void writeCodeDescription(hid_t h_file) { * * @todo Use a proper XML library to avoid stupid copies. */ -FILE* prepareXMFfile(const char* baseName) { - char buffer[1024]; - - char fileName[FILENAME_BUFFER_SIZE]; - char tempFileName[FILENAME_BUFFER_SIZE]; - snprintf(fileName, FILENAME_BUFFER_SIZE, "%s.xmf", baseName); - snprintf(tempFileName, FILENAME_BUFFER_SIZE, "%s_temp.xmf", baseName); - FILE* xmfFile = fopen(fileName, "r"); - FILE* tempFile = fopen(tempFileName, "w"); - - if (xmfFile == NULL) error("Unable to open current XMF file."); - - if (tempFile == NULL) error("Unable to open temporary file."); - - /* First we make a temporary copy of the XMF file and count the lines */ - int counter = 0; - while (fgets(buffer, 1024, xmfFile) != NULL) { - counter++; - fprintf(tempFile, "%s", buffer); - } - fclose(tempFile); - fclose(xmfFile); - - /* We then copy the XMF file back up to the closing lines */ - xmfFile = fopen(fileName, "w"); - tempFile = fopen(tempFileName, "r"); - - if (xmfFile == NULL) error("Unable to open current XMF file."); - - if (tempFile == NULL) error("Unable to open temporary file."); - - int i = 0; - while (fgets(buffer, 1024, tempFile) != NULL && i < counter - 3) { - i++; - fprintf(xmfFile, "%s", buffer); - } - fprintf(xmfFile, "\n"); - fclose(tempFile); - remove(tempFileName); - - return xmfFile; -} - -/** - * @brief Writes the begin of the XMF file - * - * @todo Exploit the XML nature of the XMF format to write a proper XML writer - *and simplify all the XMF-related stuff. - */ -void createXMFfile(const char* baseName) { - - char fileName[FILENAME_BUFFER_SIZE]; - snprintf(fileName, FILENAME_BUFFER_SIZE, "%s.xmf", baseName); - FILE* xmfFile = fopen(fileName, "w"); - - fprintf(xmfFile, "<?xml version=\"1.0\" ?> \n"); - fprintf(xmfFile, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []> \n"); - fprintf( - xmfFile, - "<Xdmf xmlns:xi=\"http://www.w3.org/2003/XInclude\" Version=\"2.1\">\n"); - fprintf(xmfFile, "<Domain>\n"); - fprintf(xmfFile, - "<Grid Name=\"TimeSeries\" GridType=\"Collection\" " - "CollectionType=\"Temporal\">\n\n"); - - fprintf(xmfFile, "</Grid>\n"); - fprintf(xmfFile, "</Domain>\n"); - fprintf(xmfFile, "</Xdmf>\n"); - - fclose(xmfFile); -} - -/** - * @brief Writes the part of the XMF entry presenting the geometry of the - *snapshot - * - * @param xmfFile The file to write in. - * @param hdfFileName The name of the HDF5 file corresponding to this output. - * @param time The current simulation time. - */ -void writeXMFoutputheader(FILE* xmfFile, char* hdfFileName, float time) { - /* Write end of file */ - - fprintf(xmfFile, "<!-- XMF description for file: %s -->\n", hdfFileName); - fprintf(xmfFile, - "<Grid GridType=\"Collection\" CollectionType=\"Spatial\">\n"); - fprintf(xmfFile, "<Time Type=\"Single\" Value=\"%f\"/>\n", time); -} - -/** - * @brief Writes the end of the XMF file (closes all open markups) - * - * @param xmfFile The file to write in. - * @param output The number of this output. - * @param time The current simulation time. - */ -void writeXMFoutputfooter(FILE* xmfFile, int output, float time) { - /* Write end of the section of this time step */ - - fprintf(xmfFile, - "\n</Grid> <!-- End of meta-data for output=%03i, time=%f -->\n", - output, time); - fprintf(xmfFile, "\n</Grid> <!-- timeSeries -->\n"); - fprintf(xmfFile, "</Domain>\n"); - fprintf(xmfFile, "</Xdmf>\n"); - - fclose(xmfFile); -} - -void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N, - enum PARTICLE_TYPE ptype) { - fprintf(xmfFile, "\n<Grid Name=\"%s\" GridType=\"Uniform\">\n", - particle_type_names[ptype]); - fprintf(xmfFile, - "<Topology TopologyType=\"Polyvertex\" Dimensions=\"%zu\"/>\n", N); - fprintf(xmfFile, "<Geometry GeometryType=\"XYZ\">\n"); - fprintf(xmfFile, - "<DataItem Dimensions=\"%zu 3\" NumberType=\"Double\" " - "Precision=\"8\" " - "Format=\"HDF\">%s:/PartType%d/Coordinates</DataItem>\n", - N, hdfFileName, (int)ptype); - fprintf(xmfFile, - "</Geometry>\n <!-- Done geometry for %s, start of particle fields " - "list -->\n", - particle_type_names[ptype]); -} - -void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype) { - fprintf(xmfFile, "</Grid> <!-- End of meta-data for parttype=%s -->\n", - particle_type_names[ptype]); -} - -/** - * @brief Writes the lines corresponding to an array of the HDF5 output - * - * @param xmfFile The file in which to write - * @param fileName The name of the HDF5 file associated to this XMF descriptor. - * @param partTypeGroupName The name of the group containing the particles in - *the HDF5 file. - * @param name The name of the array in the HDF5 file. - * @param N The number of particles. - * @param dim The dimension of the quantity (1 for scalars, 3 for vectors). - * @param type The type of the data to write. - * - * @todo Treat the types in a better way. - */ -void writeXMFline(FILE* xmfFile, const char* fileName, - const char* partTypeGroupName, const char* name, size_t N, - int dim, enum DATA_TYPE type) { - fprintf(xmfFile, - "<Attribute Name=\"%s\" AttributeType=\"%s\" Center=\"Node\">\n", - name, dim == 1 ? "Scalar" : "Vector"); - if (dim == 1) - fprintf(xmfFile, - "<DataItem Dimensions=\"%zu\" NumberType=\"Double\" " - "Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n", - N, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name); - else - fprintf(xmfFile, - "<DataItem Dimensions=\"%zu %d\" NumberType=\"Double\" " - "Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n", - N, dim, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name); - fprintf(xmfFile, "</Attribute>\n"); -} /** * @brief Prepare the DM particles (in gparts) read in for the addition of the @@ -580,7 +418,7 @@ void writeXMFline(FILE* xmfFile, const char* fileName, * @param gparts The array of #gpart freshly read in. * @param Ndm The number of DM particles read in. */ -void prepare_dm_gparts(struct gpart* const gparts, size_t Ndm) { +void io_prepare_dm_gparts(struct gpart* const gparts, size_t Ndm) { /* Let's give all these gparts a negative id */ for (size_t i = 0; i < Ndm; ++i) { @@ -606,9 +444,9 @@ void prepare_dm_gparts(struct gpart* const gparts, size_t Ndm) { * @param Ngas The number of gas particles read in. * @param Ndm The number of DM particles read in. */ -void duplicate_hydro_gparts(struct part* const parts, - struct gpart* const gparts, size_t Ngas, - size_t Ndm) { +void io_duplicate_hydro_gparts(struct part* const parts, + struct gpart* const gparts, size_t Ngas, + size_t Ndm) { for (size_t i = 0; i < Ngas; ++i) { @@ -644,9 +482,9 @@ void duplicate_hydro_gparts(struct part* const parts, * @param Nstars The number of stars particles read in. * @param Ndm The number of DM and gas particles read in. */ -void duplicate_star_gparts(struct spart* const sparts, - struct gpart* const gparts, size_t Nstars, - size_t Ndm) { +void io_duplicate_star_gparts(struct spart* const sparts, + struct gpart* const gparts, size_t Nstars, + size_t Ndm) { for (size_t i = 0; i < Nstars; ++i) { @@ -678,8 +516,8 @@ void duplicate_star_gparts(struct spart* const sparts, * @param dmparts The array of #gpart containg DM particles to be filled. * @param Ndm The number of DM particles. */ -void collect_dm_gparts(const struct gpart* const gparts, size_t Ntot, - struct gpart* const dmparts, size_t Ndm) { +void io_collect_dm_gparts(const struct gpart* const gparts, size_t Ntot, + struct gpart* const dmparts, size_t Ndm) { size_t count = 0; diff --git a/src/common_io.h b/src/common_io.h index bf1840d497c46f58568d1bed7cb3409f60e047ee..2ebfbbadcfd81ac09275da51a58235fa7e283ee3 100644 --- a/src/common_io.h +++ b/src/common_io.h @@ -23,17 +23,22 @@ /* Config parameters. */ #include "../config.h" -#if defined(HAVE_HDF5) - +/* Local includes. */ #include "part.h" #include "units.h" +#define FIELD_BUFFER_SIZE 200 +#define PARTICLE_GROUP_BUFFER_SIZE 50 +#define FILENAME_BUFFER_SIZE 150 + +#if defined(HAVE_HDF5) + /** * @brief The different types of data used in the GADGET IC files. * * (This is admittedly a poor substitute to C++ templates...) */ -enum DATA_TYPE { +enum IO_DATA_TYPE { INT, LONG, LONGLONG, @@ -45,68 +50,38 @@ enum DATA_TYPE { CHAR }; -/** - * @brief The different particle types present in a GADGET IC file - * - */ -enum PARTICLE_TYPE { - GAS = 0, - DM = 1, - BOUNDARY = 2, - DUMMY = 3, - STAR = 4, - BH = 5, - NUM_PARTICLE_TYPES -}; - -extern const char* particle_type_names[]; - -#define FILENAME_BUFFER_SIZE 150 -#define FIELD_BUFFER_SIZE 200 -#define PARTICLE_GROUP_BUFFER_SIZE 50 - -hid_t hdf5Type(enum DATA_TYPE type); -size_t sizeOfType(enum DATA_TYPE type); -int isDoublePrecision(enum DATA_TYPE type); - -void collect_dm_gparts(const struct gpart* const gparts, size_t Ntot, - struct gpart* const dmparts, size_t Ndm); -void prepare_dm_gparts(struct gpart* const gparts, size_t Ndm); -void duplicate_hydro_gparts(struct part* const parts, - struct gpart* const gparts, size_t Ngas, - size_t Ndm); -void duplicate_star_gparts(struct spart* const sparts, - struct gpart* const gparts, size_t Nstars, - size_t Ndm); +hid_t io_hdf5_type(enum IO_DATA_TYPE type); +size_t io_sizeof_type(enum IO_DATA_TYPE type); +int io_is_double_precision(enum IO_DATA_TYPE type); -void readAttribute(hid_t grp, char* name, enum DATA_TYPE type, void* data); +void io_read_attribute(hid_t grp, char* name, enum IO_DATA_TYPE type, + void* data); -void writeAttribute(hid_t grp, const char* name, enum DATA_TYPE type, - void* data, int num); +void io_write_attribute(hid_t grp, const char* name, enum IO_DATA_TYPE type, + void* data, int num); -void writeAttribute_d(hid_t grp, const char* name, double data); -void writeAttribute_f(hid_t grp, const char* name, float data); -void writeAttribute_i(hid_t grp, const char* name, int data); -void writeAttribute_l(hid_t grp, const char* name, long data); -void writeAttribute_s(hid_t grp, const char* name, const char* str); +void io_write_attribute_d(hid_t grp, const char* name, double data); +void io_write_attribute_f(hid_t grp, const char* name, float data); +void io_write_attribute_i(hid_t grp, const char* name, int data); +void io_write_attribute_l(hid_t grp, const char* name, long data); +void io_write_attribute_s(hid_t grp, const char* name, const char* str); -void createXMFfile(const char* baseName); -FILE* prepareXMFfile(const char* baseName); -void writeXMFoutputheader(FILE* xmfFile, char* hdfFileName, float time); -void writeXMFoutputfooter(FILE* xmfFile, int outputCount, float time); -void writeXMFgroupheader(FILE* xmfFile, char* hdfFileName, size_t N, - enum PARTICLE_TYPE ptype); -void writeXMFgroupfooter(FILE* xmfFile, enum PARTICLE_TYPE ptype); -void writeXMFline(FILE* xmfFile, const char* fileName, - const char* partTypeGroupName, const char* name, size_t N, - int dim, enum DATA_TYPE type); +void io_write_code_description(hid_t h_file); -void writeCodeDescription(hid_t h_file); - -void readUnitSystem(hid_t h_file, struct UnitSystem* us); -void writeUnitSystem(hid_t h_grp, const struct UnitSystem* us, - const char* groupName); +void io_read_UnitSystem(hid_t h_file, struct UnitSystem* us); +void io_write_UnitSystem(hid_t h_grp, const struct UnitSystem* us, + const char* groupName); #endif /* defined HDF5 */ +void io_collect_dm_gparts(const struct gpart* const gparts, size_t Ntot, + struct gpart* const dmparts, size_t Ndm); +void io_prepare_dm_gparts(struct gpart* const gparts, size_t Ndm); +void io_duplicate_hydro_gparts(struct part* const parts, + struct gpart* const gparts, size_t Ngas, + size_t Ndm); +void io_duplicate_star_gparts(struct spart* const sparts, + struct gpart* const gparts, size_t Nstars, + size_t Ndm); + #endif /* SWIFT_COMMON_IO_H */ diff --git a/src/hydro/Default/hydro_io.h b/src/hydro/Default/hydro_io.h index bb35c914bcab8787f609b4dd49acd0cc883b4263..62e94b05ffea259aac99d4b3714e0eea7e7c955f 100644 --- a/src/hydro/Default/hydro_io.h +++ b/src/hydro/Default/hydro_io.h @@ -91,21 +91,25 @@ void hydro_write_particles(struct part* parts, struct io_props* list, void writeSPHflavour(hid_t h_grpsph) { /* Viscosity and thermal conduction */ - writeAttribute_s(h_grpsph, "Thermal Conductivity Model", - "Price (2008) without switch"); - writeAttribute_f(h_grpsph, "Thermal Conductivity alpha", - const_conductivity_alpha); - writeAttribute_s(h_grpsph, "Viscosity Model", - "Morris & Monaghan (1997), Rosswog, Davies, Thielemann & " - "Piran (2000) with additional Balsara (1995) switch"); - writeAttribute_f(h_grpsph, "Viscosity alpha_min", const_viscosity_alpha_min); - writeAttribute_f(h_grpsph, "Viscosity alpha_max", const_viscosity_alpha_max); - writeAttribute_f(h_grpsph, "Viscosity beta", 2.f); - writeAttribute_f(h_grpsph, "Viscosity decay length", const_viscosity_length); + io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", + "Price (2008) without switch"); + io_write_attribute_f(h_grpsph, "Thermal Conductivity alpha", + const_conductivity_alpha); + io_write_attribute_s( + h_grpsph, "Viscosity Model", + "Morris & Monaghan (1997), Rosswog, Davies, Thielemann & " + "Piran (2000) with additional Balsara (1995) switch"); + io_write_attribute_f(h_grpsph, "Viscosity alpha_min", + const_viscosity_alpha_min); + io_write_attribute_f(h_grpsph, "Viscosity alpha_max", + const_viscosity_alpha_max); + io_write_attribute_f(h_grpsph, "Viscosity beta", 2.f); + io_write_attribute_f(h_grpsph, "Viscosity decay length", + const_viscosity_length); /* Time integration properties */ - writeAttribute_f(h_grpsph, "Maximal Delta u change over dt", - const_max_u_change); + io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt", + const_max_u_change); } /** diff --git a/src/hydro/Gadget2/hydro_io.h b/src/hydro/Gadget2/hydro_io.h index 162d368dd073be2fd0f06f4ecbc1431fb34e7798..3e46b2351eb6a3871946dd9c69c8d108b10da0df 100644 --- a/src/hydro/Gadget2/hydro_io.h +++ b/src/hydro/Gadget2/hydro_io.h @@ -108,13 +108,13 @@ void hydro_write_particles(struct part* parts, struct io_props* list, void writeSPHflavour(hid_t h_grpsph) { /* Viscosity and thermal conduction */ - writeAttribute_s(h_grpsph, "Thermal Conductivity Model", - "(No treatment) as in Springel (2005)"); - writeAttribute_s( + io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", + "(No treatment) as in Springel (2005)"); + io_write_attribute_s( h_grpsph, "Viscosity Model", "as in Springel (2005), i.e. Monaghan (1992) with Balsara (1995) switch"); - writeAttribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha); - writeAttribute_f(h_grpsph, "Viscosity beta", 3.f); + io_write_attribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha); + io_write_attribute_f(h_grpsph, "Viscosity beta", 3.f); } /** diff --git a/src/hydro/Gizmo/hydro_io.h b/src/hydro/Gizmo/hydro_io.h index e5f221ae4345dc519a50d332131ecf296f318338..dcf8cd8fd6cf37c0d7f4ba718746ec7dc3e79b01 100644 --- a/src/hydro/Gizmo/hydro_io.h +++ b/src/hydro/Gizmo/hydro_io.h @@ -143,18 +143,18 @@ void hydro_write_particles(struct part* parts, struct io_props* list, */ void writeSPHflavour(hid_t h_grpsph) { /* Gradient information */ - writeAttribute_s(h_grpsph, "Gradient reconstruction model", - HYDRO_GRADIENT_IMPLEMENTATION); + io_write_attribute_s(h_grpsph, "Gradient reconstruction model", + HYDRO_GRADIENT_IMPLEMENTATION); /* Slope limiter information */ - writeAttribute_s(h_grpsph, "Cell wide slope limiter model", - HYDRO_SLOPE_LIMITER_CELL_IMPLEMENTATION); - writeAttribute_s(h_grpsph, "Piecewise slope limiter model", - HYDRO_SLOPE_LIMITER_FACE_IMPLEMENTATION); + io_write_attribute_s(h_grpsph, "Cell wide slope limiter model", + HYDRO_SLOPE_LIMITER_CELL_IMPLEMENTATION); + io_write_attribute_s(h_grpsph, "Piecewise slope limiter model", + HYDRO_SLOPE_LIMITER_FACE_IMPLEMENTATION); /* Riemann solver information */ - writeAttribute_s(h_grpsph, "Riemann solver type", - RIEMANN_SOLVER_IMPLEMENTATION); + io_write_attribute_s(h_grpsph, "Riemann solver type", + RIEMANN_SOLVER_IMPLEMENTATION); } /** diff --git a/src/hydro/Minimal/hydro_io.h b/src/hydro/Minimal/hydro_io.h index 8c83349a3e17d6b3375663698af7beeeab0636bc..2ec0cb11d1e3ccaaa09d9822c75b396364912df8 100644 --- a/src/hydro/Minimal/hydro_io.h +++ b/src/hydro/Minimal/hydro_io.h @@ -123,13 +123,13 @@ void writeSPHflavour(hid_t h_grpsph) { /* Viscosity and thermal conduction */ /* Nothing in this minimal model... */ - writeAttribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment"); - writeAttribute_s(h_grpsph, "Viscosity Model", - "Minimal treatment as in Monaghan (1992)"); + io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment"); + io_write_attribute_s(h_grpsph, "Viscosity Model", + "Minimal treatment as in Monaghan (1992)"); /* Time integration properties */ - writeAttribute_f(h_grpsph, "Maximal Delta u change over dt", - const_max_u_change); + io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt", + const_max_u_change); } /** diff --git a/src/hydro/PressureEntropy/hydro_io.h b/src/hydro/PressureEntropy/hydro_io.h index fcc8439f64d299b7dcb59e819f8dd273112ce25a..10243750c01d6bc64664f9834bc4cc245c786f49 100644 --- a/src/hydro/PressureEntropy/hydro_io.h +++ b/src/hydro/PressureEntropy/hydro_io.h @@ -123,16 +123,16 @@ void writeSPHflavour(hid_t h_grpsph) { /* Viscosity and thermal conduction */ /* Nothing in this minimal model... */ - writeAttribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment"); - writeAttribute_s( + io_write_attribute_s(h_grpsph, "Thermal Conductivity Model", "No treatment"); + io_write_attribute_s( h_grpsph, "Viscosity Model", "as in Springel (2005), i.e. Monaghan (1992) with Balsara (1995) switch"); - writeAttribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha); - writeAttribute_f(h_grpsph, "Viscosity beta", 3.f); + io_write_attribute_f(h_grpsph, "Viscosity alpha", const_viscosity_alpha); + io_write_attribute_f(h_grpsph, "Viscosity beta", 3.f); /* Time integration properties */ - writeAttribute_f(h_grpsph, "Maximal Delta u change over dt", - const_max_u_change); + io_write_attribute_f(h_grpsph, "Maximal Delta u change over dt", + const_max_u_change); } /** diff --git a/src/hydro_properties.c b/src/hydro_properties.c index 815969975b4f5e6b39099e71bbbec4e43c875ddc..8db23a3de0123400fe691b0d60f076929e1b0b48 100644 --- a/src/hydro_properties.c +++ b/src/hydro_properties.c @@ -89,18 +89,19 @@ void hydro_props_print(const struct hydro_props *p) { #if defined(HAVE_HDF5) void hydro_props_print_snapshot(hid_t h_grpsph, const struct hydro_props *p) { - writeAttribute_f(h_grpsph, "Adiabatic index", hydro_gamma); - writeAttribute_i(h_grpsph, "Dimension", (int)hydro_dimension); - writeAttribute_s(h_grpsph, "Scheme", SPH_IMPLEMENTATION); - writeAttribute_s(h_grpsph, "Kernel function", kernel_name); - writeAttribute_f(h_grpsph, "Kernel target N_ngb", p->target_neighbours); - writeAttribute_f(h_grpsph, "Kernel delta N_ngb", p->delta_neighbours); - writeAttribute_f(h_grpsph, "Kernel eta", p->eta_neighbours); - writeAttribute_f(h_grpsph, "CFL parameter", p->CFL_condition); - writeAttribute_f(h_grpsph, "Volume log(max(delta h))", p->log_max_h_change); - writeAttribute_f(h_grpsph, "Volume max change time-step", - pow_dimension(expf(p->log_max_h_change))); - writeAttribute_i(h_grpsph, "Max ghost iterations", - p->max_smoothing_iterations); + io_write_attribute_f(h_grpsph, "Adiabatic index", hydro_gamma); + io_write_attribute_i(h_grpsph, "Dimension", (int)hydro_dimension); + io_write_attribute_s(h_grpsph, "Scheme", SPH_IMPLEMENTATION); + io_write_attribute_s(h_grpsph, "Kernel function", kernel_name); + io_write_attribute_f(h_grpsph, "Kernel target N_ngb", p->target_neighbours); + io_write_attribute_f(h_grpsph, "Kernel delta N_ngb", p->delta_neighbours); + io_write_attribute_f(h_grpsph, "Kernel eta", p->eta_neighbours); + io_write_attribute_f(h_grpsph, "CFL parameter", p->CFL_condition); + io_write_attribute_f(h_grpsph, "Volume log(max(delta h))", + p->log_max_h_change); + io_write_attribute_f(h_grpsph, "Volume max change time-step", + pow_dimension(expf(p->log_max_h_change))); + io_write_attribute_i(h_grpsph, "Max ghost iterations", + p->max_smoothing_iterations); } #endif diff --git a/src/io_properties.h b/src/io_properties.h index af0d81aec8cf4901d2bfcce8cd023a2e04b804bf..4c972ea90c3353c80559a2ee82b9ded571a39d41 100644 --- a/src/io_properties.h +++ b/src/io_properties.h @@ -38,7 +38,7 @@ struct io_props { char name[FIELD_BUFFER_SIZE]; /* Type of the field */ - enum DATA_TYPE type; + enum IO_DATA_TYPE type; /* Dimension (1D, 3D, ...) */ int dimension; @@ -87,7 +87,7 @@ struct io_props { * Do not call this function directly. Use the macro defined above. */ struct io_props io_make_input_field_(char name[FIELD_BUFFER_SIZE], - enum DATA_TYPE type, int dimension, + enum IO_DATA_TYPE type, int dimension, enum DATA_IMPORTANCE importance, enum UnitConversionFactor units, char* field, size_t partSize) { @@ -127,7 +127,7 @@ struct io_props io_make_input_field_(char name[FIELD_BUFFER_SIZE], * Do not call this function directly. Use the macro defined above. */ struct io_props io_make_output_field_(char name[FIELD_BUFFER_SIZE], - enum DATA_TYPE type, int dimension, + enum IO_DATA_TYPE type, int dimension, enum UnitConversionFactor units, char* field, size_t partSize) { struct io_props r; @@ -170,7 +170,7 @@ struct io_props io_make_output_field_(char name[FIELD_BUFFER_SIZE], * Do not call this function directly. Use the macro defined above. */ struct io_props io_make_output_field_convert_part_( - char name[FIELD_BUFFER_SIZE], enum DATA_TYPE type, int dimension, + char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, enum UnitConversionFactor units, char* field, size_t partSize, struct part* parts, float (*functionPtr)(struct engine*, struct part*)) { @@ -214,7 +214,7 @@ struct io_props io_make_output_field_convert_part_( * Do not call this function directly. Use the macro defined above. */ struct io_props io_make_output_field_convert_gpart_( - char name[FIELD_BUFFER_SIZE], enum DATA_TYPE type, int dimension, + char name[FIELD_BUFFER_SIZE], enum IO_DATA_TYPE type, int dimension, enum UnitConversionFactor units, char* field, size_t partSize, struct gpart* gparts, float (*functionPtr)(struct engine*, struct gpart*)) { diff --git a/src/parallel_io.c b/src/parallel_io.c index 8f86b6cc6d04ed7a6ace9f155355f88940ec94d6..d210ad05cefd659934e795bfbf9812fef4f82bd7 100644 --- a/src/parallel_io.c +++ b/src/parallel_io.c @@ -48,6 +48,7 @@ #include "part.h" #include "stars_io.h" #include "units.h" +#include "xmf.h" /** * @brief Reads a data array from a given HDF5 group. @@ -73,7 +74,7 @@ void readArray(hid_t grp, const struct io_props prop, size_t N, const struct UnitSystem* internal_units, const struct UnitSystem* ic_units) { - const size_t typeSize = sizeOfType(prop.type); + const size_t typeSize = io_sizeof_type(prop.type); const size_t copySize = typeSize * prop.dimension; const size_t num_elements = N * prop.dimension; @@ -102,7 +103,7 @@ void readArray(hid_t grp, const struct io_props prop, size_t N, /* Check data type */ const hid_t h_type = H5Dget_type(h_data); if (h_type < 0) error("Unable to retrieve data type from the file"); - /* if (!H5Tequal(h_type, hdf5Type(type))) */ + /* if (!H5Tequal(h_type, hdf5_type(type))) */ /* error("Non-matching types between the code and the file"); */ /* Allocate temporary buffer */ @@ -140,7 +141,7 @@ void readArray(hid_t grp, const struct io_props prop, size_t N, /* Read HDF5 dataspace in temporary buffer */ /* Dirty version that happens to work for vectors but should be improved */ /* Using HDF5 dataspaces would be better */ - const hid_t h_err = H5Dread(h_data, hdf5Type(prop.type), h_memspace, + const hid_t h_err = H5Dread(h_data, io_hdf5_type(prop.type), h_memspace, h_filespace, h_plist_id, temp); if (h_err < 0) { error("Error while reading data array '%s'.", prop.name); @@ -153,7 +154,7 @@ void readArray(hid_t grp, const struct io_props prop, size_t N, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(prop.type)) { + if (io_is_double_precision(prop.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -203,14 +204,14 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, const struct UnitSystem* internal_units, const struct UnitSystem* snapshot_units) { - const size_t typeSize = sizeOfType(props.type); + const size_t typeSize = io_sizeof_type(props.type); const size_t copySize = typeSize * props.dimension; const size_t num_elements = N * props.dimension; /* message("Writing '%s' array...", props.name); */ /* Allocate temporary buffer */ - void* temp = malloc(num_elements * sizeOfType(props.type)); + void* temp = malloc(num_elements * io_sizeof_type(props.type)); if (temp == NULL) error("Unable to allocate memory for temporary buffer"); /* Copy particle data to temporary buffer */ @@ -241,7 +242,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(props.type)) { + if (io_is_double_precision(props.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -300,8 +301,8 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, /* Create dataset */ const hid_t h_data = - H5Dcreate(grp, props.name, hdf5Type(props.type), h_filespace, H5P_DEFAULT, - H5P_DEFAULT, H5P_DEFAULT); + H5Dcreate(grp, props.name, io_hdf5_type(props.type), h_filespace, + H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); if (h_data < 0) { error("Error while creating dataset '%s'.", props.name); } @@ -315,7 +316,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE); /* Write temporary buffer to HDF5 dataspace */ - h_err = H5Dwrite(h_data, hdf5Type(props.type), h_memspace, h_filespace, + h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace, h_plist_id, temp); if (h_err < 0) { error("Error while writing data array '%s'.", props.name); @@ -323,19 +324,20 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, /* Write XMF description for this data set */ if (mpi_rank == 0) - writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N_total, - props.dimension, props.type); + xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total, + props.dimension, props.type); /* Write unit conversion factors for this data set */ char buffer[FIELD_BUFFER_SIZE]; units_cgs_conversion_string(buffer, snapshot_units, props.units); - writeAttribute_d(h_data, "CGS conversion factor", - units_cgs_conversion_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "h-scale exponent", - units_h_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "a-scale exponent", - units_a_factor(snapshot_units, props.units)); - writeAttribute_s(h_data, "Conversion factor", buffer); + io_write_attribute_d( + h_data, "CGS conversion factor", + units_cgs_conversion_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "h-scale exponent", + units_h_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "a-scale exponent", + units_a_factor(snapshot_units, props.units)); + io_write_attribute_s(h_data, "Conversion factor", buffer); /* Free and close everything */ free(temp); @@ -383,13 +385,13 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, hid_t h_file = 0, h_grp = 0; /* GADGET has only cubic boxes (in cosmological mode) */ double boxSize[3] = {0.0, -1.0, -1.0}; - long long numParticles[NUM_PARTICLE_TYPES] = {0}; - long long numParticles_highWord[NUM_PARTICLE_TYPES] = {0}; - size_t N[NUM_PARTICLE_TYPES] = {0}; - long long N_total[NUM_PARTICLE_TYPES] = {0}; - long long offset[NUM_PARTICLE_TYPES] = {0}; + long long numParticles[swift_type_count] = {0}; + long long numParticles_highWord[swift_type_count] = {0}; + size_t N[swift_type_count] = {0}; + long long N_total[swift_type_count] = {0}; + long long offset[swift_type_count] = {0}; int dimension = 3; /* Assume 3D if nothing is specified */ - size_t Ndm; + size_t Ndm = 0; /* Open file */ /* message("Opening file '%s' as IC.", fileName); */ @@ -406,7 +408,7 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, if (h_grp < 0) error("Error while opening runtime parameters\n"); /* Read the relevant information */ - readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); + io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic); /* Close runtime parameters */ H5Gclose(h_grp); @@ -420,21 +422,21 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, const hid_t hid_dim = H5Aexists(h_grp, "Dimension"); if (hid_dim < 0) error("Error while testing existance of 'Dimension' attribute"); - if (hid_dim > 0) readAttribute(h_grp, "Dimension", INT, &dimension); + if (hid_dim > 0) io_read_attribute(h_grp, "Dimension", INT, &dimension); if (dimension != hydro_dimension) error("ICs dimensionality (%dD) does not match code dimensionality (%dD)", dimension, (int)hydro_dimension); /* Read the relevant information and print status */ int flag_entropy_temp[6]; - readAttribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); + io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); *flag_entropy = flag_entropy_temp[0]; - readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); - readAttribute(h_grp, "NumPart_Total", LONGLONG, numParticles); - readAttribute(h_grp, "NumPart_Total_HighWord", LONGLONG, - numParticles_highWord); + io_read_attribute(h_grp, "BoxSize", DOUBLE, boxSize); + io_read_attribute(h_grp, "NumPart_Total", LONGLONG, numParticles); + io_read_attribute(h_grp, "NumPart_Total_HighWord", LONGLONG, + numParticles_highWord); - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) + for (int ptype = 0; ptype < swift_type_count; ++ptype) N_total[ptype] = (numParticles[ptype]) + (numParticles_highWord[ptype] << 32); @@ -447,7 +449,7 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* dim[1], dim[2]); */ /* Divide the particles among the tasks. */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) { + for (int ptype = 0; ptype < swift_type_count; ++ptype) { offset[ptype] = mpi_rank * N_total[ptype] / mpi_size; N[ptype] = (mpi_rank + 1) * N_total[ptype] / mpi_size - offset[ptype]; } @@ -458,7 +460,7 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* Read the unit system used in the ICs */ struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem)); if (ic_units == NULL) error("Unable to allocate memory for IC unit system"); - readUnitSystem(h_file, ic_units); + io_read_UnitSystem(h_file, ic_units); /* Tell the user if a conversion will be needed */ if (mpi_rank == 0) { @@ -507,7 +509,7 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store star particles */ if (with_stars) { - *Nstars = N[STAR]; + *Nstars = N[swift_type_star]; if (posix_memalign((void*)sparts, spart_align, *Nstars * sizeof(struct spart)) != 0) error("Error while allocating memory for star particles"); @@ -517,7 +519,9 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store gravity particles */ if (with_gravity) { Ndm = N[1]; - *Ngparts = (with_hydro ? N[GAS] : 0) + N[DM] + (with_stars ? N[STAR] : 0); + *Ngparts = (with_hydro ? N[swift_type_gas] : 0) + + N[swift_type_dark_matter] + + (with_stars ? N[swift_type_star] : 0); if (posix_memalign((void*)gparts, gpart_align, *Ngparts * sizeof(struct gpart)) != 0) error("Error while allocating memory for gravity particles"); @@ -531,7 +535,7 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* message("NumPart = [%zd, %zd] Total = %zd", *Ngas, Ndm, *Ngparts); */ /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N_total[ptype] == 0) continue; @@ -552,21 +556,21 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, /* Read particle fields into the particle structure */ switch (ptype) { - case GAS: + case swift_type_gas: if (with_hydro) { Nparticles = *Ngas; hydro_read_particles(*parts, list, &num_fields); } break; - case DM: + case swift_type_dark_matter: if (with_gravity) { Nparticles = Ndm; darkmatter_read_particles(*gparts, list, &num_fields); } break; - case STAR: + case swift_type_star: if (with_stars) { Nparticles = *Nstars; star_read_particles(*sparts, list, &num_fields); @@ -588,15 +592,15 @@ void read_ic_parallel(char* fileName, const struct UnitSystem* internal_units, } /* Prepare the DM particles */ - if (!dry_run && with_gravity) prepare_dm_gparts(*gparts, Ndm); + if (!dry_run && with_gravity) io_prepare_dm_gparts(*gparts, Ndm); /* Duplicate the hydro particles into gparts */ if (!dry_run && with_gravity && with_hydro) - duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); + io_duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); /* Duplicate the star particles into gparts */ if (!dry_run && with_gravity && with_stars) - duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); + io_duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); /* message("Done Reading particles..."); */ @@ -659,10 +663,10 @@ void write_output_parallel(struct engine* e, const char* baseName, outputCount); /* First time, we need to create the XMF file */ - if (outputCount == 0 && mpi_rank == 0) createXMFfile(baseName); + if (outputCount == 0 && mpi_rank == 0) xmf_create_file(baseName); /* Prepare the XMF file for the new entry */ - if (mpi_rank == 0) xmfFile = prepareXMFfile(baseName); + if (mpi_rank == 0) xmfFile = xmf_prepare_file(baseName); /* Open HDF5 file */ hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); @@ -674,11 +678,11 @@ void write_output_parallel(struct engine* e, const char* baseName, /* Compute offset in the file and total number of * particles */ - size_t N[NUM_PARTICLE_TYPES] = {Ngas, Ndm, 0, 0, Nstars, 0}; - long long N_total[NUM_PARTICLE_TYPES] = {0}; - long long offset[NUM_PARTICLE_TYPES] = {0}; - MPI_Exscan(&N, &offset, NUM_PARTICLE_TYPES, MPI_LONG_LONG_INT, MPI_SUM, comm); - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) + size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0}; + long long N_total[swift_type_count] = {0}; + long long offset[swift_type_count] = {0}; + MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm); + for (int ptype = 0; ptype < swift_type_count; ++ptype) N_total[ptype] = offset[ptype] + N[ptype]; /* The last rank now has the correct N_total. Let's @@ -691,7 +695,7 @@ void write_output_parallel(struct engine* e, const char* baseName, /* Write the part of the XMF file corresponding to this * specific output */ - if (mpi_rank == 0) writeXMFoutputheader(xmfFile, fileName, e->time); + if (mpi_rank == 0) xmf_write_outputheader(xmfFile, fileName, e->time); /* Open header to write simulation properties */ /* message("Writing runtime parameters..."); */ @@ -700,7 +704,7 @@ void write_output_parallel(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating runtime parameters group\n"); /* Write the relevant information */ - writeAttribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); + io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); /* Close runtime parameters */ H5Gclose(h_grp); @@ -711,39 +715,39 @@ void write_output_parallel(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating file header\n"); /* Print the relevant information and print status */ - writeAttribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); + io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); double dblTime = e->time; - writeAttribute(h_grp, "Time", DOUBLE, &dblTime, 1); + io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1); int dimension = (int)hydro_dimension; - writeAttribute(h_grp, "Dimension", INT, &dimension, 1); + io_write_attribute(h_grp, "Dimension", INT, &dimension, 1); /* GADGET-2 legacy values */ /* Number of particles of each type */ - unsigned int numParticles[NUM_PARTICLE_TYPES] = {0}; - unsigned int numParticlesHighWord[NUM_PARTICLE_TYPES] = {0}; - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) { + unsigned int numParticles[swift_type_count] = {0}; + unsigned int numParticlesHighWord[swift_type_count] = {0}; + for (int ptype = 0; ptype < swift_type_count; ++ptype) { numParticles[ptype] = (unsigned int)N_total[ptype]; numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32); } - writeAttribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total", UINT, numParticles, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticlesHighWord, - NUM_PARTICLE_TYPES); + io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT, + numParticlesHighWord, swift_type_count); double MassTable[6] = {0., 0., 0., 0., 0., 0.}; - writeAttribute(h_grp, "MassTable", DOUBLE, MassTable, NUM_PARTICLE_TYPES); - unsigned int flagEntropy[NUM_PARTICLE_TYPES] = {0}; + io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count); + unsigned int flagEntropy[swift_type_count] = {0}; flagEntropy[0] = writeEntropyFlag(); - writeAttribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); + io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, + swift_type_count); + io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); /* Close header */ H5Gclose(h_grp); /* Print the code version */ - writeCodeDescription(h_file); + io_write_code_description(h_file); /* Print the SPH parameters */ h_grp = @@ -761,10 +765,10 @@ void write_output_parallel(struct engine* e, const char* baseName, H5Gclose(h_grp); /* Print the system of Units used in the spashot */ - writeUnitSystem(h_file, snapshot_units, "Units"); + io_write_UnitSystem(h_file, snapshot_units, "Units"); /* Print the system of Units used internally */ - writeUnitSystem(h_file, internal_units, "InternalCodeUnits"); + io_write_UnitSystem(h_file, internal_units, "InternalCodeUnits"); /* Tell the user if a conversion will be needed */ if (e->verbose && mpi_rank == 0) { @@ -800,7 +804,7 @@ void write_output_parallel(struct engine* e, const char* baseName, } /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N_total[ptype] == 0) continue; @@ -808,8 +812,8 @@ void write_output_parallel(struct engine* e, const char* baseName, /* Add the global information for that particle type to * the XMF meta-file */ if (mpi_rank == 0) - writeXMFgroupheader(xmfFile, fileName, N_total[ptype], - (enum PARTICLE_TYPE)ptype); + xmf_write_groupheader(xmfFile, fileName, N_total[ptype], + (enum part_type)ptype); /* Open the particle group in the file */ char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE]; @@ -828,12 +832,12 @@ void write_output_parallel(struct engine* e, const char* baseName, /* Write particle fields from the particle structure */ switch (ptype) { - case GAS: + case swift_type_gas: Nparticles = Ngas; hydro_write_particles(parts, list, &num_fields); break; - case DM: + case swift_type_dark_matter: /* Allocate temporary array */ if (posix_memalign((void*)&dmparts, gpart_align, Ndm * sizeof(struct gpart)) != 0) @@ -843,14 +847,14 @@ void write_output_parallel(struct engine* e, const char* baseName, bzero(dmparts, Ndm * sizeof(struct gpart)); /* Collect the DM particles from gpart */ - collect_dm_gparts(gparts, Ntot, dmparts, Ndm); + io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm); /* Write DM particles */ Nparticles = Ndm; darkmatter_write_particles(dmparts, list, &num_fields); break; - case STAR: + case swift_type_star: Nparticles = Nstars; star_write_particles(sparts, list, &num_fields); break; @@ -875,11 +879,11 @@ void write_output_parallel(struct engine* e, const char* baseName, H5Gclose(h_grp); /* Close this particle group in the XMF file as well */ - if (mpi_rank == 0) writeXMFgroupfooter(xmfFile, (enum PARTICLE_TYPE)ptype); + if (mpi_rank == 0) xmf_write_groupfooter(xmfFile, (enum part_type)ptype); } /* Write LXMF file descriptor */ - if (mpi_rank == 0) writeXMFoutputfooter(xmfFile, outputCount, e->time); + if (mpi_rank == 0) xmf_write_outputfooter(xmfFile, outputCount, e->time); /* message("Done writing particles..."); */ diff --git a/src/parser.c b/src/parser.c index 879605760bedb5e4bb282788c2392edf0d3a64df..41a3e8637630eceb3beb9383acb3344028d38659 100644 --- a/src/parser.c +++ b/src/parser.c @@ -722,6 +722,6 @@ void parser_write_params_to_file(const struct swift_params *params, void parser_write_params_to_hdf5(const struct swift_params *params, hid_t grp) { for (int i = 0; i < params->paramCount; i++) - writeAttribute_s(grp, params->data[i].name, params->data[i].value); + io_write_attribute_s(grp, params->data[i].name, params->data[i].value); } #endif diff --git a/src/part_type.c b/src/part_type.c new file mode 100644 index 0000000000000000000000000000000000000000..af97bd34aaace93a9faa953c0c9345d83ca3bc34 --- /dev/null +++ b/src/part_type.c @@ -0,0 +1,24 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk). + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ + +/* This object's header. */ +#include "part_type.h" + +const char* part_type_names[swift_type_count] = {"Gas", "DM", "Dummy", + "Dummy", "Star", "BH"}; diff --git a/src/part_type.h b/src/part_type.h index 2c564d6908c8887e8fa8a5197a0a92ed85cbe5bb..fbe2b2aeaea37503635372b0f09f8edde4578721 100644 --- a/src/part_type.h +++ b/src/part_type.h @@ -28,7 +28,10 @@ enum part_type { swift_type_gas = 0, swift_type_dark_matter = 1, swift_type_star = 4, - swift_type_black_hole = 5 + swift_type_black_hole = 5, + swift_type_count } __attribute__((packed)); +extern const char* part_type_names[]; + #endif /* SWIFT_PART_TYPES_H */ diff --git a/src/runner.c b/src/runner.c index bffb25d934f79f20316f69f8fe69befa8f9f9023..b8cb2bcaa37629613a31aab63ca74a45817e7ec7 100644 --- a/src/runner.c +++ b/src/runner.c @@ -591,7 +591,6 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { struct part *restrict parts = c->parts; struct xpart *restrict xparts = c->xparts; - int redo, count = c->count; const struct engine *e = r->e; const float target_wcount = e->hydro_properties->target_neighbours; const float max_wcount = @@ -599,6 +598,7 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { const float min_wcount = target_wcount - e->hydro_properties->delta_neighbours; const int max_smoothing_iter = e->hydro_properties->max_smoothing_iterations; + int redo = 0, count = 0; TIMER_TIC; @@ -611,11 +611,15 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { if (c->progeny[k] != NULL) runner_do_ghost(r, c->progeny[k], 0); } else { - /* Init the IDs that have to be updated. */ + /* Init the list of active particles that have to be updated. */ int *pid = NULL; - if ((pid = malloc(sizeof(int) * count)) == NULL) + if ((pid = malloc(sizeof(int) * c->count)) == NULL) error("Can't allocate memory for pid."); - for (int k = 0; k < count; k++) pid[k] = k; + for (int k = 0; k < c->count; k++) + if (part_is_active(&parts[k], e)) { + pid[count] = k; + ++count; + } /* While there are particles that need to be updated... */ for (int num_reruns = 0; count > 0 && num_reruns < max_smoothing_iter; @@ -624,18 +628,23 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { /* Reset the redo-count. */ redo = 0; - /* Loop over the parts in this cell. */ + /* Loop over the remaining active parts in this cell. */ for (int i = 0; i < count; i++) { /* Get a direct pointer on the part. */ struct part *restrict p = &parts[pid[i]]; struct xpart *restrict xp = &xparts[pid[i]]; +#ifdef SWIFT_DEBUG_CHECKS /* Is this part within the timestep? */ - if (part_is_active(p, e)) { + if (!part_is_active(p, e)) error("Ghost applied to inactive particle"); +#endif + + /* Finish the density calculation */ + hydro_end_density(p); - /* Finish the density calculation */ - hydro_end_density(p); + /* Did we get the right number of neighbours? */ + if (p->density.wcount > max_wcount || p->density.wcount < min_wcount) { float h_corr = 0.f; @@ -651,37 +660,32 @@ void runner_do_ghost(struct runner *r, struct cell *c, int timer) { h_corr = (h_corr > -0.5f * p->h) ? h_corr : -0.5f * p->h; } - /* Did we get the right number density? */ - if (p->density.wcount > max_wcount || - p->density.wcount < min_wcount) { + /* Ok, correct then */ + p->h += h_corr; - /* Ok, correct then */ - p->h += h_corr; + /* Flag for another round of fun */ + pid[redo] = pid[i]; + redo += 1; - /* Flag for another round of fun */ - pid[redo] = pid[i]; - redo += 1; + /* Re-initialise everything */ + hydro_init_part(p); - /* Re-initialise everything */ - hydro_init_part(p); - - /* Off we go ! */ - continue; - } + /* Off we go ! */ + continue; + } - /* We now have a particle whose smoothing length has converged */ + /* We now have a particle whose smoothing length has converged */ - /* As of here, particle force variables will be set. */ + /* As of here, particle force variables will be set. */ - /* Compute variables required for the force loop */ - hydro_prepare_force(p, xp); + /* Compute variables required for the force loop */ + hydro_prepare_force(p, xp); - /* The particle force values are now set. Do _NOT_ - try to read any particle density variables! */ + /* The particle force values are now set. Do _NOT_ + try to read any particle density variables! */ - /* Prepare the particle for the force loop over neighbours */ - hydro_reset_acceleration(p); - } + /* Prepare the particle for the force loop over neighbours */ + hydro_reset_acceleration(p); } /* We now need to treat the particles whose smoothing length had not diff --git a/src/runner_doiact.h b/src/runner_doiact.h index 6fa04018088a05ed0319489e88677c3ebcabd0f2..5ebd36ef7bd14564160ff63f356b2e532b09fc22 100644 --- a/src/runner_doiact.h +++ b/src/runner_doiact.h @@ -776,145 +776,153 @@ void DOPAIR1(struct runner *r, struct cell *ci, struct cell *cj) { const double dj_min = sort_j[0].d; const float dx_max = (ci->dx_max + cj->dx_max); - /* Loop over the parts in ci. */ - for (int pid = count_i - 1; - pid >= 0 && sort_i[pid].d + hi_max + dx_max > dj_min; pid--) { + if (cell_is_active(ci, e)) { - /* Get a hold of the ith part in ci. */ - struct part *restrict pi = &parts_i[sort_i[pid].i]; - if (!part_is_active(pi, e)) continue; - const float hi = pi->h; - const double di = sort_i[pid].d + hi * kernel_gamma + dx_max - rshift; - if (di < dj_min) continue; + /* Loop over the parts in ci. */ + for (int pid = count_i - 1; + pid >= 0 && sort_i[pid].d + hi_max + dx_max > dj_min; pid--) { - double pix[3]; - for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; - const float hig2 = hi * hi * kernel_gamma2; + /* Get a hold of the ith part in ci. */ + struct part *restrict pi = &parts_i[sort_i[pid].i]; + if (!part_is_active(pi, e)) continue; + const float hi = pi->h; + const double di = sort_i[pid].d + hi * kernel_gamma + dx_max - rshift; + if (di < dj_min) continue; - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j && sort_j[pjd].d < di; pjd++) { + double pix[3]; + for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; + const float hig2 = hi * hi * kernel_gamma2; - /* Get a pointer to the jth particle. */ - struct part *restrict pj = &parts_j[sort_j[pjd].i]; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j && sort_j[pjd].d < di; pjd++) { - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pix[k] - pj->x[k]; - r2 += dx[k] * dx[k]; - } + /* Get a pointer to the jth particle. */ + struct part *restrict pj = &parts_j[sort_j[pjd].i]; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pix[k] - pj->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hig2) { + /* Hit or miss? */ + if (r2 < hig2) { #ifndef WITH_OLD_VECTORIZATION - IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); + IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); #else - /* Add this interaction to the queue. */ - r2q[icount] = r2; - dxq[3 * icount + 0] = dx[0]; - dxq[3 * icount + 1] = dx[1]; - dxq[3 * icount + 2] = dx[2]; - hiq[icount] = hi; - hjq[icount] = pj->h; - piq[icount] = pi; - pjq[icount] = pj; - icount += 1; + /* Add this interaction to the queue. */ + r2q[icount] = r2; + dxq[3 * icount + 0] = dx[0]; + dxq[3 * icount + 1] = dx[1]; + dxq[3 * icount + 2] = dx[2]; + hiq[icount] = hi; + hjq[icount] = pj->h; + piq[icount] = pi; + pjq[icount] = pj; + icount += 1; - /* Flush? */ - if (icount == VEC_SIZE) { - IACT_NONSYM_VEC(r2q, dxq, hiq, hjq, piq, pjq); - icount = 0; - } + /* Flush? */ + if (icount == VEC_SIZE) { + IACT_NONSYM_VEC(r2q, dxq, hiq, hjq, piq, pjq); + icount = 0; + } #endif - } + } - } /* loop over the parts in cj. */ + } /* loop over the parts in cj. */ - } /* loop over the parts in ci. */ + } /* loop over the parts in ci. */ - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j && sort_j[pjd].d - hj_max - dx_max < di_max; - pjd++) { + } /* Cell ci is active */ - /* Get a hold of the jth part in cj. */ - struct part *restrict pj = &parts_j[sort_j[pjd].i]; - if (!part_is_active(pj, e)) continue; - const float hj = pj->h; - const double dj = sort_j[pjd].d - hj * kernel_gamma - dx_max - rshift; - if (dj > di_max) continue; + if (cell_is_active(cj, e)) { - double pjx[3]; - for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; - const float hjg2 = hj * hj * kernel_gamma2; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j && sort_j[pjd].d - hj_max - dx_max < di_max; + pjd++) { - /* Loop over the parts in ci. */ - for (int pid = count_i - 1; pid >= 0 && sort_i[pid].d > dj; pid--) { + /* Get a hold of the jth part in cj. */ + struct part *restrict pj = &parts_j[sort_j[pjd].i]; + if (!part_is_active(pj, e)) continue; + const float hj = pj->h; + const double dj = sort_j[pjd].d - hj * kernel_gamma - dx_max - rshift; + if (dj > di_max) continue; - /* Get a pointer to the jth particle. */ - struct part *restrict pi = &parts_i[sort_i[pid].i]; + double pjx[3]; + for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; + const float hjg2 = hj * hj * kernel_gamma2; - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pjx[k] - pi->x[k]; - r2 += dx[k] * dx[k]; - } + /* Loop over the parts in ci. */ + for (int pid = count_i - 1; pid >= 0 && sort_i[pid].d > dj; pid--) { + + /* Get a pointer to the jth particle. */ + struct part *restrict pi = &parts_i[sort_i[pid].i]; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pjx[k] - pi->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hjg2) { + /* Hit or miss? */ + if (r2 < hjg2) { #ifndef WITH_OLD_VECTORIZATION - IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); + IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); #else - /* Add this interaction to the queue. */ - r2q[icount] = r2; - dxq[3 * icount + 0] = dx[0]; - dxq[3 * icount + 1] = dx[1]; - dxq[3 * icount + 2] = dx[2]; - hiq[icount] = hj; - hjq[icount] = pi->h; - piq[icount] = pj; - pjq[icount] = pi; - icount += 1; + /* Add this interaction to the queue. */ + r2q[icount] = r2; + dxq[3 * icount + 0] = dx[0]; + dxq[3 * icount + 1] = dx[1]; + dxq[3 * icount + 2] = dx[2]; + hiq[icount] = hj; + hjq[icount] = pi->h; + piq[icount] = pj; + pjq[icount] = pi; + icount += 1; - /* Flush? */ - if (icount == VEC_SIZE) { - IACT_NONSYM_VEC(r2q, dxq, hiq, hjq, piq, pjq); - icount = 0; - } + /* Flush? */ + if (icount == VEC_SIZE) { + IACT_NONSYM_VEC(r2q, dxq, hiq, hjq, piq, pjq); + icount = 0; + } #endif - } + } + + } /* loop over the parts in ci. */ } /* loop over the parts in cj. */ - } /* loop over the parts in ci. */ + } /* Cell cj is active */ #ifdef WITH_OLD_VECTORIZATION /* Pick up any leftovers. */ diff --git a/src/runner_doiact_nosort.h b/src/runner_doiact_nosort.h index d38f01c6955e2ee9848698d2b46d3f4a14ad0873..beb7fd7026c23c3a7a516b31d23334095bfddae3 100644 --- a/src/runner_doiact_nosort.h +++ b/src/runner_doiact_nosort.h @@ -27,91 +27,99 @@ void DOPAIR1_NOSORT(struct runner *r, struct cell *ci, struct cell *cj) { struct part *restrict parts_i = ci->parts; struct part *restrict parts_j = cj->parts; - /* Loop over the parts in ci. */ - for (int pid = 0; pid < count_i; pid++) { + if (cell_is_active(ci, e)) { - /* Get a hold of the ith part in ci. */ - struct part *restrict pi = &parts_i[pid]; - if (!part_is_active(pi, e)) continue; - const float hi = pi->h; + /* Loop over the parts in ci. */ + for (int pid = 0; pid < count_i; pid++) { - double pix[3]; - for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; - const float hig2 = hi * hi * kernel_gamma2; + /* Get a hold of the ith part in ci. */ + struct part *restrict pi = &parts_i[pid]; + if (!part_is_active(pi, e)) continue; + const float hi = pi->h; - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j; pjd++) { + double pix[3]; + for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; + const float hig2 = hi * hi * kernel_gamma2; - /* Get a pointer to the jth particle. */ - struct part *restrict pj = &parts_j[pjd]; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j; pjd++) { - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pix[k] - pj->x[k]; - r2 += dx[k] * dx[k]; - } + /* Get a pointer to the jth particle. */ + struct part *restrict pj = &parts_j[pjd]; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pix[k] - pj->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hig2) { - IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); - } + /* Hit or miss? */ + if (r2 < hig2) { + IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); + } - } /* loop over the parts in cj. */ + } /* loop over the parts in cj. */ - } /* loop over the parts in ci. */ + } /* loop over the parts in ci. */ - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j; pjd++) { + } /* Cell ci is active */ - /* Get a hold of the ith part in ci. */ - struct part *restrict pj = &parts_j[pjd]; - if (!part_is_active(pj, e)) continue; - const float hj = pj->h; + if (cell_is_active(cj, e)) { - double pjx[3]; - for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; - const float hjg2 = hj * hj * kernel_gamma2; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j; pjd++) { - /* Loop over the parts in ci. */ - for (int pid = 0; pid < count_i; pid++) { + /* Get a hold of the ith part in ci. */ + struct part *restrict pj = &parts_j[pjd]; + if (!part_is_active(pj, e)) continue; + const float hj = pj->h; - /* Get a pointer to the jth particle. */ - struct part *restrict pi = &parts_i[pid]; + double pjx[3]; + for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; + const float hjg2 = hj * hj * kernel_gamma2; - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pjx[k] - pi->x[k]; - r2 += dx[k] * dx[k]; - } + /* Loop over the parts in ci. */ + for (int pid = 0; pid < count_i; pid++) { + + /* Get a pointer to the jth particle. */ + struct part *restrict pi = &parts_i[pid]; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pjx[k] - pi->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hjg2) { - IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); - } + /* Hit or miss? */ + if (r2 < hjg2) { + IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); + } - } /* loop over the parts in ci. */ + } /* loop over the parts in ci. */ + + } /* loop over the parts in cj. */ - } /* loop over the parts in cj. */ + } /* Cell cj is active */ TIMER_TOC(TIMER_DOPAIR); } @@ -144,93 +152,101 @@ void DOPAIR2_NOSORT(struct runner *r, struct cell *ci, struct cell *cj) { struct part *restrict parts_i = ci->parts; struct part *restrict parts_j = cj->parts; - /* Loop over the parts in ci. */ - for (int pid = 0; pid < count_i; pid++) { + if (cell_is_active(ci, e)) { - /* Get a hold of the ith part in ci. */ - struct part *restrict pi = &parts_i[pid]; - if (!part_is_active(pi, e)) continue; - const float hi = pi->h; + /* Loop over the parts in ci. */ + for (int pid = 0; pid < count_i; pid++) { - double pix[3]; - for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; - const float hig2 = hi * hi * kernel_gamma2; + /* Get a hold of the ith part in ci. */ + struct part *restrict pi = &parts_i[pid]; + if (!part_is_active(pi, e)) continue; + const float hi = pi->h; - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j; pjd++) { + double pix[3]; + for (int k = 0; k < 3; k++) pix[k] = pi->x[k] - shift[k]; + const float hig2 = hi * hi * kernel_gamma2; - /* Get a pointer to the jth particle. */ - struct part *restrict pj = &parts_j[pjd]; - const float hjg2 = pj->h * pj->h * kernel_gamma2; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j; pjd++) { - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pix[k] - pj->x[k]; - r2 += dx[k] * dx[k]; - } + /* Get a pointer to the jth particle. */ + struct part *restrict pj = &parts_j[pjd]; + const float hjg2 = pj->h * pj->h * kernel_gamma2; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pix[k] - pj->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hig2 || r2 < hjg2) { - IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); - } + /* Hit or miss? */ + if (r2 < hig2 || r2 < hjg2) { + IACT_NONSYM(r2, dx, hi, pj->h, pi, pj); + } - } /* loop over the parts in cj. */ + } /* loop over the parts in cj. */ - } /* loop over the parts in ci. */ + } /* loop over the parts in ci. */ - /* Loop over the parts in cj. */ - for (int pjd = 0; pjd < count_j; pjd++) { + } /* Cell ci is active */ - /* Get a hold of the ith part in ci. */ - struct part *restrict pj = &parts_j[pjd]; - if (!part_is_active(pj, e)) continue; - const float hj = pj->h; + if (cell_is_active(cj, e)) { - double pjx[3]; - for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; - const float hjg2 = hj * hj * kernel_gamma2; + /* Loop over the parts in cj. */ + for (int pjd = 0; pjd < count_j; pjd++) { - /* Loop over the parts in ci. */ - for (int pid = 0; pid < count_i; pid++) { + /* Get a hold of the ith part in ci. */ + struct part *restrict pj = &parts_j[pjd]; + if (!part_is_active(pj, e)) continue; + const float hj = pj->h; - /* Get a pointer to the jth particle. */ - struct part *restrict pi = &parts_i[pid]; - const float hig2 = pi->h * pi->h * kernel_gamma2; + double pjx[3]; + for (int k = 0; k < 3; k++) pjx[k] = pj->x[k] + shift[k]; + const float hjg2 = hj * hj * kernel_gamma2; - /* Compute the pairwise distance. */ - float r2 = 0.0f; - float dx[3]; - for (int k = 0; k < 3; k++) { - dx[k] = pjx[k] - pi->x[k]; - r2 += dx[k] * dx[k]; - } + /* Loop over the parts in ci. */ + for (int pid = 0; pid < count_i; pid++) { + + /* Get a pointer to the jth particle. */ + struct part *restrict pi = &parts_i[pid]; + const float hig2 = pi->h * pi->h * kernel_gamma2; + + /* Compute the pairwise distance. */ + float r2 = 0.0f; + float dx[3]; + for (int k = 0; k < 3; k++) { + dx[k] = pjx[k] - pi->x[k]; + r2 += dx[k] * dx[k]; + } #ifdef SWIFT_DEBUG_CHECKS - /* Check that particles have been drifted to the current time */ - if (pj->ti_drift != e->ti_current) - error("Particle pj not drifted to current time"); - if (pi->ti_drift != e->ti_current) - error("Particle pi not drifted to current time"); + /* Check that particles have been drifted to the current time */ + if (pj->ti_drift != e->ti_current) + error("Particle pj not drifted to current time"); + if (pi->ti_drift != e->ti_current) + error("Particle pi not drifted to current time"); #endif - /* Hit or miss? */ - if (r2 < hjg2 || r2 < hig2) { - IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); - } + /* Hit or miss? */ + if (r2 < hjg2 || r2 < hig2) { + IACT_NONSYM(r2, dx, hj, pi->h, pj, pi); + } - } /* loop over the parts in ci. */ + } /* loop over the parts in ci. */ + + } /* loop over the parts in cj. */ - } /* loop over the parts in cj. */ + } /* Cell cj is active */ TIMER_TOC(TIMER_DOPAIR); } diff --git a/src/serial_io.c b/src/serial_io.c index 17b3fb2d839a123ca51f3da5f8893fdd1b690ba4..03f7df82c549b49c76799470bab733e0a6820759 100644 --- a/src/serial_io.c +++ b/src/serial_io.c @@ -48,6 +48,7 @@ #include "part.h" #include "stars_io.h" #include "units.h" +#include "xmf.h" /*----------------------------------------------------------------------------- * Routines reading an IC file @@ -76,7 +77,7 @@ void readArray(hid_t grp, const struct io_props props, size_t N, const struct UnitSystem* internal_units, const struct UnitSystem* ic_units) { - const size_t typeSize = sizeOfType(props.type); + const size_t typeSize = io_sizeof_type(props.type); const size_t copySize = typeSize * props.dimension; const size_t num_elements = N * props.dimension; @@ -105,7 +106,7 @@ void readArray(hid_t grp, const struct io_props props, size_t N, /* Check data type */ const hid_t h_type = H5Dget_type(h_data); if (h_type < 0) error("Unable to retrieve data type from the file"); - /* if (!H5Tequal(h_type, hdf5Type(type))) */ + /* if (!H5Tequal(h_type, hdf5_type(type))) */ /* error("Non-matching types between the code and the file"); */ /* Allocate temporary buffer */ @@ -139,7 +140,7 @@ void readArray(hid_t grp, const struct io_props props, size_t N, /* Read HDF5 dataspace in temporary buffer */ /* Dirty version that happens to work for vectors but should be improved */ /* Using HDF5 dataspaces would be better */ - const hid_t h_err = H5Dread(h_data, hdf5Type(props.type), h_memspace, + const hid_t h_err = H5Dread(h_data, io_hdf5_type(props.type), h_memspace, h_filespace, H5P_DEFAULT, temp); if (h_err < 0) { error("Error while reading data array '%s'.", props.name); @@ -152,7 +153,7 @@ void readArray(hid_t grp, const struct io_props props, size_t N, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(props.type)) { + if (io_is_double_precision(props.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -236,26 +237,27 @@ void prepareArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, } /* Create dataset */ - const hid_t h_data = H5Dcreate(grp, props.name, hdf5Type(props.type), h_space, - H5P_DEFAULT, h_prop, H5P_DEFAULT); + const hid_t h_data = H5Dcreate(grp, props.name, io_hdf5_type(props.type), + h_space, H5P_DEFAULT, h_prop, H5P_DEFAULT); if (h_data < 0) { error("Error while creating dataspace '%s'.", props.name); } /* Write XMF description for this data set */ - writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N_total, - props.dimension, props.type); + xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total, + props.dimension, props.type); /* Write unit conversion factors for this data set */ char buffer[FIELD_BUFFER_SIZE]; units_cgs_conversion_string(buffer, snapshot_units, props.units); - writeAttribute_d(h_data, "CGS conversion factor", - units_cgs_conversion_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "h-scale exponent", - units_h_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "a-scale exponent", - units_a_factor(snapshot_units, props.units)); - writeAttribute_s(h_data, "Conversion factor", buffer); + io_write_attribute_d( + h_data, "CGS conversion factor", + units_cgs_conversion_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "h-scale exponent", + units_h_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "a-scale exponent", + units_a_factor(snapshot_units, props.units)); + io_write_attribute_s(h_data, "Conversion factor", buffer); /* Close everything */ H5Pclose(h_prop); @@ -288,7 +290,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, const struct UnitSystem* internal_units, const struct UnitSystem* snapshot_units) { - const size_t typeSize = sizeOfType(props.type); + const size_t typeSize = io_sizeof_type(props.type); const size_t copySize = typeSize * props.dimension; const size_t num_elements = N * props.dimension; @@ -300,7 +302,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, internal_units, snapshot_units); /* Allocate temporary buffer */ - void* temp = malloc(num_elements * sizeOfType(props.type)); + void* temp = malloc(num_elements * io_sizeof_type(props.type)); if (temp == NULL) error("Unable to allocate memory for temporary buffer"); /* Copy particle data to temporary buffer */ @@ -331,7 +333,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(props.type)) { + if (io_is_double_precision(props.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -379,7 +381,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL); /* Write temporary buffer to HDF5 dataspace */ - h_err = H5Dwrite(h_data, hdf5Type(props.type), h_memspace, h_filespace, + h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace, H5P_DEFAULT, temp); if (h_err < 0) error("Error while writing data array '%s'.", props.name); @@ -434,13 +436,13 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* GADGET has only cubic boxes (in cosmological mode) */ double boxSize[3] = {0.0, -1.0, -1.0}; /* GADGET has 6 particle types. We only keep the type 0 & 1 for now*/ - long long numParticles[NUM_PARTICLE_TYPES] = {0}; - long long numParticles_highWord[NUM_PARTICLE_TYPES] = {0}; - size_t N[NUM_PARTICLE_TYPES] = {0}; - long long N_total[NUM_PARTICLE_TYPES] = {0}; - long long offset[NUM_PARTICLE_TYPES] = {0}; + long long numParticles[swift_type_count] = {0}; + long long numParticles_highWord[swift_type_count] = {0}; + size_t N[swift_type_count] = {0}; + long long N_total[swift_type_count] = {0}; + long long offset[swift_type_count] = {0}; int dimension = 3; /* Assume 3D if nothing is specified */ - size_t Ndm; + size_t Ndm = 0; struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem)); /* First read some information about the content */ @@ -458,7 +460,7 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, if (h_grp < 0) error("Error while opening runtime parameters\n"); /* Read the relevant information */ - readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); + io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic); /* Close runtime parameters */ H5Gclose(h_grp); @@ -472,21 +474,21 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, const hid_t hid_dim = H5Aexists(h_grp, "Dimension"); if (hid_dim < 0) error("Error while testing existance of 'Dimension' attribute"); - if (hid_dim > 0) readAttribute(h_grp, "Dimension", INT, &dimension); + if (hid_dim > 0) io_read_attribute(h_grp, "Dimension", INT, &dimension); if (dimension != hydro_dimension) error("ICs dimensionality (%dD) does not match code dimensionality (%dD)", dimension, (int)hydro_dimension); /* Read the relevant information and print status */ int flag_entropy_temp[6]; - readAttribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); + io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); *flag_entropy = flag_entropy_temp[0]; - readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); - readAttribute(h_grp, "NumPart_Total", LONGLONG, numParticles); - readAttribute(h_grp, "NumPart_Total_HighWord", LONGLONG, - numParticles_highWord); + io_read_attribute(h_grp, "BoxSize", DOUBLE, boxSize); + io_read_attribute(h_grp, "NumPart_Total", LONGLONG, numParticles); + io_read_attribute(h_grp, "NumPart_Total_HighWord", LONGLONG, + numParticles_highWord); - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) + for (int ptype = 0; ptype < swift_type_count; ++ptype) N_total[ptype] = (numParticles[ptype]) + (numParticles_highWord[ptype] << 32); @@ -505,7 +507,7 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* Read the unit system used in the ICs */ if (ic_units == NULL) error("Unable to allocate memory for IC unit system"); - readUnitSystem(h_file, ic_units); + io_read_UnitSystem(h_file, ic_units); if (units_are_equal(ic_units, internal_units)) { @@ -547,12 +549,12 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* Now need to broadcast that information to all ranks. */ MPI_Bcast(flag_entropy, 1, MPI_INT, 0, comm); MPI_Bcast(periodic, 1, MPI_INT, 0, comm); - MPI_Bcast(&N_total, NUM_PARTICLE_TYPES, MPI_LONG_LONG_INT, 0, comm); + MPI_Bcast(&N_total, swift_type_count, MPI_LONG_LONG_INT, 0, comm); MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm); MPI_Bcast(ic_units, sizeof(struct UnitSystem), MPI_BYTE, 0, comm); /* Divide the particles among the tasks. */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) { + for (int ptype = 0; ptype < swift_type_count; ++ptype) { offset[ptype] = mpi_rank * N_total[ptype] / mpi_size; N[ptype] = (mpi_rank + 1) * N_total[ptype] / mpi_size - offset[ptype]; } @@ -568,7 +570,7 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store star particles */ if (with_stars) { - *Nstars = N[STAR]; + *Nstars = N[swift_type_star]; if (posix_memalign((void*)sparts, spart_align, *Nstars * sizeof(struct spart)) != 0) error("Error while allocating memory for star particles"); @@ -578,7 +580,9 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store all gravity particles */ if (with_gravity) { Ndm = N[1]; - *Ngparts = (with_hydro ? N[GAS] : 0) + N[DM] + (with_stars ? N[STAR] : 0); + *Ngparts = (with_hydro ? N[swift_type_gas] : 0) + + N[swift_type_dark_matter] + + (with_stars ? N[swift_type_star] : 0); if (posix_memalign((void*)gparts, gpart_align, *Ngparts * sizeof(struct gpart)) != 0) error("Error while allocating memory for gravity particles"); @@ -604,7 +608,7 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, error("Error while opening file '%s' on rank %d.", fileName, mpi_rank); /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N[ptype] == 0) continue; @@ -625,21 +629,21 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, /* Read particle fields into the particle structure */ switch (ptype) { - case GAS: + case swift_type_gas: if (with_hydro) { Nparticles = *Ngas; hydro_read_particles(*parts, list, &num_fields); } break; - case DM: + case swift_type_dark_matter: if (with_gravity) { Nparticles = Ndm; darkmatter_read_particles(*gparts, list, &num_fields); } break; - case STAR: + case swift_type_star: if (with_stars) { Nparticles = *Nstars; star_read_particles(*sparts, list, &num_fields); @@ -670,15 +674,15 @@ void read_ic_serial(char* fileName, const struct UnitSystem* internal_units, } /* Prepare the DM particles */ - if (!dry_run && with_gravity) prepare_dm_gparts(*gparts, Ndm); + if (!dry_run && with_gravity) io_prepare_dm_gparts(*gparts, Ndm); /* Duplicate the hydro particles into gparts */ if (!dry_run && with_gravity && with_hydro) - duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); + io_duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); /* Duplicate the star particles into gparts */ if (!dry_run && with_gravity && with_stars) - duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); + io_duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); /* message("Done Reading particles..."); */ @@ -733,11 +737,11 @@ void write_output_serial(struct engine* e, const char* baseName, outputCount); /* Compute offset in the file and total number of particles */ - size_t N[NUM_PARTICLE_TYPES] = {Ngas, Ndm, 0, 0, Nstars, 0}; - long long N_total[NUM_PARTICLE_TYPES] = {0}; - long long offset[NUM_PARTICLE_TYPES] = {0}; - MPI_Exscan(&N, &offset, NUM_PARTICLE_TYPES, MPI_LONG_LONG_INT, MPI_SUM, comm); - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) + size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0}; + long long N_total[swift_type_count] = {0}; + long long offset[swift_type_count] = {0}; + MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm); + for (int ptype = 0; ptype < swift_type_count; ++ptype) N_total[ptype] = offset[ptype] + N[ptype]; /* The last rank now has the correct N_total. Let's broadcast from there */ @@ -750,13 +754,13 @@ void write_output_serial(struct engine* e, const char* baseName, if (mpi_rank == 0) { /* First time, we need to create the XMF file */ - if (outputCount == 0) createXMFfile(baseName); + if (outputCount == 0) xmf_create_file(baseName); /* Prepare the XMF file for the new entry */ - xmfFile = prepareXMFfile(baseName); + xmfFile = xmf_prepare_file(baseName); /* Write the part corresponding to this specific output */ - writeXMFoutputheader(xmfFile, fileName, e->time); + xmf_write_outputheader(xmfFile, fileName, e->time); /* Open file */ /* message("Opening file '%s'.", fileName); */ @@ -772,7 +776,7 @@ void write_output_serial(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating runtime parameters group\n"); /* Write the relevant information */ - writeAttribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); + io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); /* Close runtime parameters */ H5Gclose(h_grp); @@ -783,39 +787,39 @@ void write_output_serial(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating file header\n"); /* Print the relevant information and print status */ - writeAttribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); + io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); double dblTime = e->time; - writeAttribute(h_grp, "Time", DOUBLE, &dblTime, 1); + io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1); int dimension = (int)hydro_dimension; - writeAttribute(h_grp, "Dimension", INT, &dimension, 1); + io_write_attribute(h_grp, "Dimension", INT, &dimension, 1); /* GADGET-2 legacy values */ /* Number of particles of each type */ - unsigned int numParticles[NUM_PARTICLE_TYPES] = {0}; - unsigned int numParticlesHighWord[NUM_PARTICLE_TYPES] = {0}; - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) { + unsigned int numParticles[swift_type_count] = {0}; + unsigned int numParticlesHighWord[swift_type_count] = {0}; + for (int ptype = 0; ptype < swift_type_count; ++ptype) { numParticles[ptype] = (unsigned int)N_total[ptype]; numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32); } - writeAttribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total", UINT, numParticles, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticlesHighWord, - NUM_PARTICLE_TYPES); + io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT, + numParticlesHighWord, swift_type_count); double MassTable[6] = {0., 0., 0., 0., 0., 0.}; - writeAttribute(h_grp, "MassTable", DOUBLE, MassTable, NUM_PARTICLE_TYPES); - unsigned int flagEntropy[NUM_PARTICLE_TYPES] = {0}; + io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count); + unsigned int flagEntropy[swift_type_count] = {0}; flagEntropy[0] = writeEntropyFlag(); - writeAttribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); + io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, + swift_type_count); + io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); /* Close header */ H5Gclose(h_grp); /* Print the code version */ - writeCodeDescription(h_file); + io_write_code_description(h_file); /* Print the SPH parameters */ h_grp = H5Gcreate(h_file, "/HydroScheme", H5P_DEFAULT, H5P_DEFAULT, @@ -833,10 +837,10 @@ void write_output_serial(struct engine* e, const char* baseName, H5Gclose(h_grp); /* Print the system of Units used in the spashot */ - writeUnitSystem(h_file, snapshot_units, "Units"); + io_write_UnitSystem(h_file, snapshot_units, "Units"); /* Print the system of Units used internally */ - writeUnitSystem(h_file, internal_units, "InternalCodeUnits"); + io_write_UnitSystem(h_file, internal_units, "InternalCodeUnits"); /* Tell the user if a conversion will be needed */ if (e->verbose) { @@ -872,7 +876,7 @@ void write_output_serial(struct engine* e, const char* baseName, } /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N_total[ptype] == 0) continue; @@ -906,7 +910,7 @@ void write_output_serial(struct engine* e, const char* baseName, error("Error while opening file '%s' on rank %d.", fileName, mpi_rank); /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N_total[ptype] == 0) continue; @@ -914,8 +918,8 @@ void write_output_serial(struct engine* e, const char* baseName, /* Add the global information for that particle type to the XMF * meta-file */ if (mpi_rank == 0) - writeXMFgroupheader(xmfFile, fileName, N_total[ptype], - (enum PARTICLE_TYPE)ptype); + xmf_write_groupheader(xmfFile, fileName, N_total[ptype], + (enum part_type)ptype); /* Open the particle group in the file */ char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE]; @@ -933,12 +937,12 @@ void write_output_serial(struct engine* e, const char* baseName, /* Write particle fields from the particle structure */ switch (ptype) { - case GAS: + case swift_type_gas: Nparticles = Ngas; hydro_write_particles(parts, list, &num_fields); break; - case DM: + case swift_type_dark_matter: /* Allocate temporary array */ if (posix_memalign((void*)&dmparts, gpart_align, Ndm * sizeof(struct gpart)) != 0) @@ -946,14 +950,14 @@ void write_output_serial(struct engine* e, const char* baseName, bzero(dmparts, Ndm * sizeof(struct gpart)); /* Collect the DM particles from gpart */ - collect_dm_gparts(gparts, Ntot, dmparts, Ndm); + io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm); /* Write DM particles */ Nparticles = Ndm; darkmatter_write_particles(dmparts, list, &num_fields); break; - case STAR: + case swift_type_star: Nparticles = Nstars; star_write_particles(sparts, list, &num_fields); break; @@ -979,7 +983,7 @@ void write_output_serial(struct engine* e, const char* baseName, /* Close this particle group in the XMF file as well */ if (mpi_rank == 0) - writeXMFgroupfooter(xmfFile, (enum PARTICLE_TYPE)ptype); + xmf_write_groupfooter(xmfFile, (enum part_type)ptype); } /* Close file */ @@ -991,7 +995,7 @@ void write_output_serial(struct engine* e, const char* baseName, } /* Write footer of LXMF file descriptor */ - if (mpi_rank == 0) writeXMFoutputfooter(xmfFile, outputCount, e->time); + if (mpi_rank == 0) xmf_write_outputfooter(xmfFile, outputCount, e->time); /* message("Done writing particles..."); */ ++outputCount; diff --git a/src/single_io.c b/src/single_io.c index f53ad90fdcf2f997d374dbf20a416c2aa21990ae..f8696e96a15078a83069368094cfca198d3bb32a 100644 --- a/src/single_io.c +++ b/src/single_io.c @@ -47,6 +47,7 @@ #include "part.h" #include "stars_io.h" #include "units.h" +#include "xmf.h" /*----------------------------------------------------------------------------- * Routines reading an IC file @@ -69,7 +70,7 @@ void readArray(hid_t h_grp, const struct io_props prop, size_t N, const struct UnitSystem* internal_units, const struct UnitSystem* ic_units) { - const size_t typeSize = sizeOfType(prop.type); + const size_t typeSize = io_sizeof_type(prop.type); const size_t copySize = typeSize * prop.dimension; const size_t num_elements = N * prop.dimension; @@ -104,7 +105,7 @@ void readArray(hid_t h_grp, const struct io_props prop, size_t N, /* Check data type */ const hid_t h_type = H5Dget_type(h_data); if (h_type < 0) error("Unable to retrieve data type from the file"); - // if (!H5Tequal(h_type, hdf5Type(type))) + // if (!H5Tequal(h_type, hdf5_type(type))) // error("Non-matching types between the code and the file"); /* Allocate temporary buffer */ @@ -114,8 +115,8 @@ void readArray(hid_t h_grp, const struct io_props prop, size_t N, /* Read HDF5 dataspace in temporary buffer */ /* Dirty version that happens to work for vectors but should be improved */ /* Using HDF5 dataspaces would be better */ - const hid_t h_err = - H5Dread(h_data, hdf5Type(prop.type), H5S_ALL, H5S_ALL, H5P_DEFAULT, temp); + const hid_t h_err = H5Dread(h_data, io_hdf5_type(prop.type), H5S_ALL, H5S_ALL, + H5P_DEFAULT, temp); if (h_err < 0) { error("Error while reading data array '%s'.", prop.name); } @@ -127,7 +128,7 @@ void readArray(hid_t h_grp, const struct io_props prop, size_t N, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(prop.type)) { + if (io_is_double_precision(prop.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -173,14 +174,14 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, const struct UnitSystem* internal_units, const struct UnitSystem* snapshot_units) { - const size_t typeSize = sizeOfType(props.type); + const size_t typeSize = io_sizeof_type(props.type); const size_t copySize = typeSize * props.dimension; const size_t num_elements = N * props.dimension; /* message("Writing '%s' array...", props.name); */ /* Allocate temporary buffer */ - void* temp = malloc(num_elements * sizeOfType(props.type)); + void* temp = malloc(num_elements * io_sizeof_type(props.type)); if (temp == NULL) error("Unable to allocate memory for temporary buffer"); /* Copy particle data to temporary buffer */ @@ -211,7 +212,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, /* message("Converting ! factor=%e", factor); */ - if (isDoublePrecision(props.type)) { + if (io_is_double_precision(props.type)) { double* temp_d = temp; for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor; } else { @@ -272,33 +273,34 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile, } /* Create dataset */ - const hid_t h_data = H5Dcreate(grp, props.name, hdf5Type(props.type), h_space, - H5P_DEFAULT, h_prop, H5P_DEFAULT); + const hid_t h_data = H5Dcreate(grp, props.name, io_hdf5_type(props.type), + h_space, H5P_DEFAULT, h_prop, H5P_DEFAULT); if (h_data < 0) { error("Error while creating dataspace '%s'.", props.name); } /* Write temporary buffer to HDF5 dataspace */ - h_err = H5Dwrite(h_data, hdf5Type(props.type), h_space, H5S_ALL, H5P_DEFAULT, - temp); + h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_space, H5S_ALL, + H5P_DEFAULT, temp); if (h_err < 0) { error("Error while writing data array '%s'.", props.name); } /* Write XMF description for this data set */ - writeXMFline(xmfFile, fileName, partTypeGroupName, props.name, N, - props.dimension, props.type); + xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N, + props.dimension, props.type); /* Write unit conversion factors for this data set */ char buffer[FIELD_BUFFER_SIZE]; units_cgs_conversion_string(buffer, snapshot_units, props.units); - writeAttribute_d(h_data, "CGS conversion factor", - units_cgs_conversion_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "h-scale exponent", - units_h_factor(snapshot_units, props.units)); - writeAttribute_f(h_data, "a-scale exponent", - units_a_factor(snapshot_units, props.units)); - writeAttribute_s(h_data, "Conversion factor", buffer); + io_write_attribute_d( + h_data, "CGS conversion factor", + units_cgs_conversion_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "h-scale exponent", + units_h_factor(snapshot_units, props.units)); + io_write_attribute_f(h_data, "a-scale exponent", + units_a_factor(snapshot_units, props.units)); + io_write_attribute_s(h_data, "Conversion factor", buffer); /* Free and close everything */ free(temp); @@ -346,11 +348,11 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* GADGET has only cubic boxes (in cosmological mode) */ double boxSize[3] = {0.0, -1.0, -1.0}; /* GADGET has 6 particle types. We only keep the type 0 & 1 for now...*/ - int numParticles[NUM_PARTICLE_TYPES] = {0}; - int numParticles_highWord[NUM_PARTICLE_TYPES] = {0}; - size_t N[NUM_PARTICLE_TYPES] = {0}; + int numParticles[swift_type_count] = {0}; + int numParticles_highWord[swift_type_count] = {0}; + size_t N[swift_type_count] = {0}; int dimension = 3; /* Assume 3D if nothing is specified */ - size_t Ndm; + size_t Ndm = 0; /* Open file */ /* message("Opening file '%s' as IC.", fileName); */ @@ -365,7 +367,7 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, if (h_grp < 0) error("Error while opening runtime parameters\n"); /* Read the relevant information */ - readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic); + io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic); /* Close runtime parameters */ H5Gclose(h_grp); @@ -379,20 +381,21 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, const hid_t hid_dim = H5Aexists(h_grp, "Dimension"); if (hid_dim < 0) error("Error while testing existance of 'Dimension' attribute"); - if (hid_dim > 0) readAttribute(h_grp, "Dimension", INT, &dimension); + if (hid_dim > 0) io_read_attribute(h_grp, "Dimension", INT, &dimension); if (dimension != hydro_dimension) error("ICs dimensionality (%dD) does not match code dimensionality (%dD)", dimension, (int)hydro_dimension); /* Read the relevant information and print status */ int flag_entropy_temp[6]; - readAttribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); + io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp); *flag_entropy = flag_entropy_temp[0]; - readAttribute(h_grp, "BoxSize", DOUBLE, boxSize); - readAttribute(h_grp, "NumPart_Total", UINT, numParticles); - readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord); + io_read_attribute(h_grp, "BoxSize", DOUBLE, boxSize); + io_read_attribute(h_grp, "NumPart_Total", UINT, numParticles); + io_read_attribute(h_grp, "NumPart_Total_HighWord", UINT, + numParticles_highWord); - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) + for (int ptype = 0; ptype < swift_type_count; ++ptype) N[ptype] = ((long long)numParticles[ptype]) + ((long long)numParticles_highWord[ptype] << 32); @@ -409,7 +412,7 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* Read the unit system used in the ICs */ struct UnitSystem* ic_units = malloc(sizeof(struct UnitSystem)); if (ic_units == NULL) error("Unable to allocate memory for IC unit system"); - readUnitSystem(h_file, ic_units); + io_read_UnitSystem(h_file, ic_units); /* Tell the user if a conversion will be needed */ if (units_are_equal(ic_units, internal_units)) { @@ -447,7 +450,7 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store SPH particles */ if (with_hydro) { - *Ngas = N[GAS]; + *Ngas = N[swift_type_gas]; if (posix_memalign((void*)parts, part_align, *Ngas * sizeof(struct part)) != 0) error("Error while allocating memory for SPH particles"); @@ -456,7 +459,7 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store star particles */ if (with_stars) { - *Nstars = N[STAR]; + *Nstars = N[swift_type_star]; if (posix_memalign((void*)sparts, spart_align, *Nstars * sizeof(struct spart)) != 0) error("Error while allocating memory for star particles"); @@ -465,8 +468,10 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* Allocate memory to store all gravity particles */ if (with_gravity) { - Ndm = N[DM]; - *Ngparts = (with_hydro ? N[GAS] : 0) + N[DM] + (with_stars ? N[STAR] : 0); + Ndm = N[swift_type_dark_matter]; + *Ngparts = (with_hydro ? N[swift_type_gas] : 0) + + N[swift_type_dark_matter] + + (with_stars ? N[swift_type_star] : 0); if (posix_memalign((void*)gparts, gpart_align, *Ngparts * sizeof(struct gpart)) != 0) error("Error while allocating memory for gravity particles"); @@ -480,7 +485,7 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* message("NumPart = [%zd, %zd] Total = %zd", *Ngas, Ndm, *Ngparts); */ /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (N[ptype] == 0) continue; @@ -501,21 +506,21 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, /* Read particle fields into the structure */ switch (ptype) { - case GAS: + case swift_type_gas: if (with_hydro) { Nparticles = *Ngas; hydro_read_particles(*parts, list, &num_fields); } break; - case DM: + case swift_type_dark_matter: if (with_gravity) { Nparticles = Ndm; darkmatter_read_particles(*gparts, list, &num_fields); } break; - case STAR: + case swift_type_star: if (with_stars) { Nparticles = *Nstars; star_read_particles(*sparts, list, &num_fields); @@ -536,15 +541,15 @@ void read_ic_single(char* fileName, const struct UnitSystem* internal_units, } /* Prepare the DM particles */ - if (!dry_run && with_gravity) prepare_dm_gparts(*gparts, Ndm); + if (!dry_run && with_gravity) io_prepare_dm_gparts(*gparts, Ndm); /* Duplicate the hydro particles into gparts */ if (!dry_run && with_gravity && with_hydro) - duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); + io_duplicate_hydro_gparts(*parts, *gparts, *Ngas, Ndm); /* Duplicate the star particles into gparts */ if (!dry_run && with_gravity && with_stars) - duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); + io_duplicate_star_gparts(*sparts, *gparts, *Nstars, Ndm + *Ngas); /* message("Done Reading particles..."); */ @@ -590,7 +595,7 @@ void write_output_single(struct engine* e, const char* baseName, /* Number of unassociated gparts */ const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0; - long long N_total[NUM_PARTICLE_TYPES] = {Ngas, Ndm, 0, 0, Nstars, 0}; + long long N_total[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0}; /* File name */ char fileName[FILENAME_BUFFER_SIZE]; @@ -598,14 +603,14 @@ void write_output_single(struct engine* e, const char* baseName, outputCount); /* First time, we need to create the XMF file */ - if (outputCount == 0) createXMFfile(baseName); + if (outputCount == 0) xmf_create_file(baseName); /* Prepare the XMF file for the new entry */ FILE* xmfFile = 0; - xmfFile = prepareXMFfile(baseName); + xmfFile = xmf_prepare_file(baseName); /* Write the part corresponding to this specific output */ - writeXMFoutputheader(xmfFile, fileName, e->time); + xmf_write_outputheader(xmfFile, fileName, e->time); /* Open file */ /* message("Opening file '%s'.", fileName); */ @@ -621,7 +626,7 @@ void write_output_single(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating runtime parameters group\n"); /* Write the relevant information */ - writeAttribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); + io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1); /* Close runtime parameters */ H5Gclose(h_grp); @@ -632,39 +637,39 @@ void write_output_single(struct engine* e, const char* baseName, if (h_grp < 0) error("Error while creating file header\n"); /* Print the relevant information and print status */ - writeAttribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); + io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3); double dblTime = e->time; - writeAttribute(h_grp, "Time", DOUBLE, &dblTime, 1); + io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1); int dimension = (int)hydro_dimension; - writeAttribute(h_grp, "Dimension", INT, &dimension, 1); + io_write_attribute(h_grp, "Dimension", INT, &dimension, 1); /* GADGET-2 legacy values */ /* Number of particles of each type */ - unsigned int numParticles[NUM_PARTICLE_TYPES] = {0}; - unsigned int numParticlesHighWord[NUM_PARTICLE_TYPES] = {0}; - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ++ptype) { + unsigned int numParticles[swift_type_count] = {0}; + unsigned int numParticlesHighWord[swift_type_count] = {0}; + for (int ptype = 0; ptype < swift_type_count; ++ptype) { numParticles[ptype] = (unsigned int)N_total[ptype]; numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32); } - writeAttribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total", UINT, numParticles, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticlesHighWord, - NUM_PARTICLE_TYPES); - double MassTable[NUM_PARTICLE_TYPES] = {0}; - writeAttribute(h_grp, "MassTable", DOUBLE, MassTable, NUM_PARTICLE_TYPES); - unsigned int flagEntropy[NUM_PARTICLE_TYPES] = {0}; + io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles, + swift_type_count); + io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT, + numParticlesHighWord, swift_type_count); + double MassTable[swift_type_count] = {0}; + io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count); + unsigned int flagEntropy[swift_type_count] = {0}; flagEntropy[0] = writeEntropyFlag(); - writeAttribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, - NUM_PARTICLE_TYPES); - writeAttribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); + io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, + swift_type_count); + io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1); /* Close header */ H5Gclose(h_grp); /* Print the code version */ - writeCodeDescription(h_file); + io_write_code_description(h_file); /* Print the SPH parameters */ h_grp = @@ -682,10 +687,10 @@ void write_output_single(struct engine* e, const char* baseName, H5Gclose(h_grp); /* Print the system of Units used in the spashot */ - writeUnitSystem(h_file, snapshot_units, "Units"); + io_write_UnitSystem(h_file, snapshot_units, "Units"); /* Print the system of Units used internally */ - writeUnitSystem(h_file, internal_units, "InternalCodeUnits"); + io_write_UnitSystem(h_file, internal_units, "InternalCodeUnits"); /* Tell the user if a conversion will be needed */ if (e->verbose) { @@ -721,14 +726,14 @@ void write_output_single(struct engine* e, const char* baseName, } /* Loop over all particle types */ - for (int ptype = 0; ptype < NUM_PARTICLE_TYPES; ptype++) { + for (int ptype = 0; ptype < swift_type_count; ptype++) { /* Don't do anything if no particle of this kind */ if (numParticles[ptype] == 0) continue; /* Add the global information for that particle type to the XMF meta-file */ - writeXMFgroupheader(xmfFile, fileName, numParticles[ptype], - (enum PARTICLE_TYPE)ptype); + xmf_write_groupheader(xmfFile, fileName, numParticles[ptype], + (enum part_type)ptype); /* Open the particle group in the file */ char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE]; @@ -747,12 +752,12 @@ void write_output_single(struct engine* e, const char* baseName, /* Write particle fields from the particle structure */ switch (ptype) { - case GAS: + case swift_type_gas: N = Ngas; hydro_write_particles(parts, list, &num_fields); break; - case DM: + case swift_type_dark_matter: /* Allocate temporary array */ if (posix_memalign((void*)&dmparts, gpart_align, Ndm * sizeof(struct gpart)) != 0) @@ -760,14 +765,14 @@ void write_output_single(struct engine* e, const char* baseName, bzero(dmparts, Ndm * sizeof(struct gpart)); /* Collect the DM particles from gpart */ - collect_dm_gparts(gparts, Ntot, dmparts, Ndm); + io_collect_dm_gparts(gparts, Ntot, dmparts, Ndm); /* Write DM particles */ N = Ndm; darkmatter_write_particles(dmparts, list, &num_fields); break; - case STAR: + case swift_type_star: N = Nstars; star_write_particles(sparts, list, &num_fields); break; @@ -791,11 +796,11 @@ void write_output_single(struct engine* e, const char* baseName, H5Gclose(h_grp); /* Close this particle group in the XMF file as well */ - writeXMFgroupfooter(xmfFile, (enum PARTICLE_TYPE)ptype); + xmf_write_groupfooter(xmfFile, (enum part_type)ptype); } /* Write LXMF file descriptor */ - writeXMFoutputfooter(xmfFile, outputCount, e->time); + xmf_write_outputfooter(xmfFile, outputCount, e->time); /* message("Done writing particles..."); */ diff --git a/src/space.c b/src/space.c index db3e7a1c24c6134f2bc79475b3cfeaee13941dac..5deaa308af842436b370c78ad773062550ff242e 100644 --- a/src/space.c +++ b/src/space.c @@ -2376,6 +2376,7 @@ void space_init_sparts(struct space *s) { * @param Ngpart The number of Gravity particles in the space. * @param Nspart The number of star particles in the space. * @param periodic flag whether the domain is periodic or not. + * @param replicate How many replications along each direction do we want ? * @param gravity flag whether we are doing gravity or not. * @param verbose Print messages to stdout or not. * @param dry_run If 1, just initialise stuff, don't do anything with the parts. @@ -2388,8 +2389,8 @@ void space_init_sparts(struct space *s) { void space_init(struct space *s, const struct swift_params *params, double dim[3], struct part *parts, struct gpart *gparts, struct spart *sparts, size_t Npart, size_t Ngpart, - size_t Nspart, int periodic, int gravity, int verbose, - int dry_run) { + size_t Nspart, int periodic, int replicate, int gravity, + int verbose, int dry_run) { /* Clean-up everything */ bzero(s, sizeof(struct space)); @@ -2412,15 +2413,29 @@ void space_init(struct space *s, const struct swift_params *params, s->sparts = sparts; s->nr_queues = 1; /* Temporary value until engine construction */ + /* Are we replicating the space ? */ + if (replicate < 1) + error("Value of 'InitialConditions:replicate' (%d) is too small", + replicate); + if (replicate > 1) { + space_replicate(s, replicate, verbose); + parts = s->parts; + gparts = s->gparts; + sparts = s->sparts; + Npart = s->nr_parts; + Ngpart = s->nr_gparts; + Nspart = s->nr_sparts; + } + /* Decide on the minimal top-level cell size */ - const double dmax = max(max(dim[0], dim[1]), dim[2]); + const double dmax = max(max(s->dim[0], s->dim[1]), s->dim[2]); int maxtcells = parser_get_opt_param_int(params, "Scheduler:max_top_level_cells", space_max_top_level_cells_default); s->cell_min = 0.99 * dmax / maxtcells; /* Check that it is big enough. */ - const double dmin = min(min(dim[0], dim[1]), dim[2]); + const double dmin = min(min(s->dim[0], s->dim[1]), s->dim[2]); int needtcells = 3 * dmax / dmin; if (maxtcells < needtcells) error( @@ -2482,13 +2497,13 @@ void space_init(struct space *s, const struct swift_params *params, if (periodic) { for (size_t k = 0; k < Npart; k++) for (int j = 0; j < 3; j++) { - while (parts[k].x[j] < 0) parts[k].x[j] += dim[j]; - while (parts[k].x[j] >= dim[j]) parts[k].x[j] -= dim[j]; + while (parts[k].x[j] < 0) parts[k].x[j] += s->dim[j]; + while (parts[k].x[j] >= s->dim[j]) parts[k].x[j] -= s->dim[j]; } } else { for (size_t k = 0; k < Npart; k++) for (int j = 0; j < 3; j++) - if (parts[k].x[j] < 0 || parts[k].x[j] >= dim[j]) + if (parts[k].x[j] < 0 || parts[k].x[j] >= s->dim[j]) error("Not all particles are within the specified domain."); } @@ -2496,13 +2511,13 @@ void space_init(struct space *s, const struct swift_params *params, if (periodic) { for (size_t k = 0; k < Ngpart; k++) for (int j = 0; j < 3; j++) { - while (gparts[k].x[j] < 0) gparts[k].x[j] += dim[j]; - while (gparts[k].x[j] >= dim[j]) gparts[k].x[j] -= dim[j]; + while (gparts[k].x[j] < 0) gparts[k].x[j] += s->dim[j]; + while (gparts[k].x[j] >= s->dim[j]) gparts[k].x[j] -= s->dim[j]; } } else { for (size_t k = 0; k < Ngpart; k++) for (int j = 0; j < 3; j++) - if (gparts[k].x[j] < 0 || gparts[k].x[j] >= dim[j]) + if (gparts[k].x[j] < 0 || gparts[k].x[j] >= s->dim[j]) error("Not all g-particles are within the specified domain."); } @@ -2510,13 +2525,13 @@ void space_init(struct space *s, const struct swift_params *params, if (periodic) { for (size_t k = 0; k < Nspart; k++) for (int j = 0; j < 3; j++) { - while (sparts[k].x[j] < 0) sparts[k].x[j] += dim[j]; - while (sparts[k].x[j] >= dim[j]) sparts[k].x[j] -= dim[j]; + while (sparts[k].x[j] < 0) sparts[k].x[j] += s->dim[j]; + while (sparts[k].x[j] >= s->dim[j]) sparts[k].x[j] -= s->dim[j]; } } else { for (size_t k = 0; k < Nspart; k++) for (int j = 0; j < 3; j++) - if (sparts[k].x[j] < 0 || sparts[k].x[j] >= dim[j]) + if (sparts[k].x[j] < 0 || sparts[k].x[j] >= s->dim[j]) error("Not all s-particles are within the specified domain."); } } @@ -2542,6 +2557,127 @@ void space_init(struct space *s, const struct swift_params *params, if (!dry_run) space_regrid(s, verbose); } +/** + * @brief Replicate the content of a space along each axis. + * + * Should only be called during initialisation. + * + * @param s The #space to replicate. + * @param replicate The number of copies along each axis. + * @param verbose Are we talkative ? + */ +void space_replicate(struct space *s, int replicate, int verbose) { + + if (replicate < 1) error("Invalid replicate value: %d", replicate); + + message("Replicating space %d times along each axis.", replicate); + + const int factor = replicate * replicate * replicate; + + /* Store the current values */ + const size_t nr_parts = s->nr_parts; + const size_t nr_gparts = s->nr_gparts; + const size_t nr_sparts = s->nr_sparts; + const size_t nr_dm = nr_gparts - nr_parts - nr_sparts; + + s->size_parts = s->nr_parts = nr_parts * factor; + s->size_gparts = s->nr_gparts = nr_gparts * factor; + s->size_sparts = s->nr_sparts = nr_sparts * factor; + + /* Allocate space for new particles */ + struct part *parts = NULL; + struct gpart *gparts = NULL; + struct spart *sparts = NULL; + + if (posix_memalign((void *)&parts, part_align, + s->nr_parts * sizeof(struct part)) != 0) + error("Failed to allocate new part array."); + + if (posix_memalign((void *)&gparts, gpart_align, + s->nr_gparts * sizeof(struct gpart)) != 0) + error("Failed to allocate new gpart array."); + + if (posix_memalign((void *)&sparts, spart_align, + s->nr_sparts * sizeof(struct spart)) != 0) + error("Failed to allocate new spart array."); + + /* Replicate everything */ + for (int i = 0; i < replicate; ++i) { + for (int j = 0; j < replicate; ++j) { + for (int k = 0; k < replicate; ++k) { + const size_t offset = i * replicate * replicate + j * replicate + k; + + /* First copy the data */ + memcpy(parts + offset * nr_parts, s->parts, + nr_parts * sizeof(struct part)); + memcpy(sparts + offset * nr_sparts, s->sparts, + nr_sparts * sizeof(struct spart)); + memcpy(gparts + offset * nr_gparts, s->gparts, + nr_gparts * sizeof(struct gpart)); + + /* Shift the positions */ + const double shift[3] = {i * s->dim[0], j * s->dim[1], k * s->dim[2]}; + + for (size_t n = offset * nr_parts; n < (offset + 1) * nr_parts; ++n) { + parts[n].x[0] += shift[0]; + parts[n].x[1] += shift[1]; + parts[n].x[2] += shift[2]; + } + for (size_t n = offset * nr_gparts; n < (offset + 1) * nr_gparts; ++n) { + gparts[n].x[0] += shift[0]; + gparts[n].x[1] += shift[1]; + gparts[n].x[2] += shift[2]; + } + for (size_t n = offset * nr_sparts; n < (offset + 1) * nr_sparts; ++n) { + sparts[n].x[0] += shift[0]; + sparts[n].x[1] += shift[1]; + sparts[n].x[2] += shift[2]; + } + + /* Set the correct links (recall gpart are sorted by type at start-up): + first DM (unassociated gpart), then gas, then stars */ + if (nr_parts > 0 && nr_gparts > 0) { + const size_t offset_part = offset * nr_parts; + const size_t offset_gpart = offset * nr_gparts + nr_dm; + + for (size_t n = 0; n < nr_parts; ++n) { + parts[offset_part + n].gpart = &gparts[offset_gpart + n]; + gparts[offset_gpart + n].id_or_neg_offset = -(offset_part + n); + } + } + if (nr_sparts > 0 && nr_gparts > 0) { + const size_t offset_spart = offset * nr_sparts; + const size_t offset_gpart = offset * nr_gparts + nr_dm + nr_parts; + + for (size_t n = 0; n < nr_sparts; ++n) { + sparts[offset_spart + n].gpart = &gparts[offset_gpart + n]; + gparts[offset_gpart + n].id_or_neg_offset = -(offset_spart + n); + } + } + } + } + } + + /* Replace the content of the space */ + free(s->parts); + free(s->gparts); + free(s->sparts); + s->parts = parts; + s->gparts = gparts; + s->sparts = sparts; + + /* Finally, update the domain size */ + s->dim[0] *= replicate; + s->dim[1] *= replicate; + s->dim[2] *= replicate; + +#ifdef SWIFT_DEBUG_CHECKS + /* Verify that everything is correct */ + part_verify_links(s->parts, s->gparts, s->sparts, s->nr_parts, s->nr_gparts, + s->nr_sparts, verbose); +#endif +} + /** * @brief Cleans-up all the cell links in the space * diff --git a/src/space.h b/src/space.h index 3d525b62455f669295798a395f4d46281c919da6..7f53fbca232e72dd2d5a76fb4f23b0f815d5e421 100644 --- a/src/space.h +++ b/src/space.h @@ -165,8 +165,8 @@ int space_getsid(struct space *s, struct cell **ci, struct cell **cj, void space_init(struct space *s, const struct swift_params *params, double dim[3], struct part *parts, struct gpart *gparts, struct spart *sparts, size_t Npart, size_t Ngpart, - size_t Nspart, int periodic, int gravity, int verbose, - int dry_run); + size_t Nspart, int periodic, int replicate, int gravity, + int verbose, int dry_run); void space_sanitize(struct space *s); void space_map_cells_pre(struct space *s, int full, void (*fun)(struct cell *c, void *data), void *data); @@ -206,6 +206,7 @@ void space_init_sparts(struct space *s); void space_link_cleanup(struct space *s); void space_check_drift_point(struct space *s, integertime_t ti_drift); void space_check_timesteps(struct space *s); +void space_replicate(struct space *s, int replicate, int verbose); void space_clean(struct space *s); #endif /* SWIFT_SPACE_H */ diff --git a/src/version.c b/src/version.c index c5b9255f6cab1fc716a035d3ef739b969a3ab4d4..54a416f6b0745a523382f338fa838018e5254b1e 100644 --- a/src/version.c +++ b/src/version.c @@ -111,6 +111,27 @@ const char *git_branch(void) { return buf; } +/** + * @brief Return the date of the commit in the git repository + * + * The date of the commit of the code we are running. + * + * @result git branch + */ +const char *git_date(void) { + static char buf[256]; + static int initialised = 0; + static const char *date = GIT_DATE; + if (!initialised) { + if (strlen(date) == 0) + sprintf(buf, "%s", "unknown"); + else + sprintf(buf, "%s", date); + initialised = 1; + } + return buf; +} + /** * @brief Return the options passed to the 'configure' script * @@ -332,7 +353,8 @@ void greetings(void) { printf(" SPH With Inter-dependent Fine-grained Tasking\n\n"); printf(" Version : %s\n", package_version()); - printf(" Revision: %s, Branch: %s\n", git_revision(), git_branch()); + printf(" Revision: %s, Branch: %s, Date: %s\n", git_revision(), git_branch(), + git_date()); printf(" Webpage : %s\n\n", PACKAGE_URL); printf(" Config. options: %s\n\n", configuration_options()); printf(" Compiler: %s, Version: %s\n", compiler_name(), compiler_version()); diff --git a/src/version.h b/src/version.h index 5fa057ec8675e9310364eaec3f5098404976e895..60998958098c1a37198cdbb3729982835f6e4f62 100644 --- a/src/version.h +++ b/src/version.h @@ -25,6 +25,7 @@ const char* package_version(void); const char* hostname(void); const char* git_revision(void); const char* git_branch(void); +const char* git_date(void); const char* configuration_options(void); const char* compilation_cflags(void); const char* compiler_name(void); diff --git a/src/version_string.h.in b/src/version_string.h.in index 2be9a84fd52bfe089917d4c0da874fa7ef2dce6b..cea9f189ea4c68a483b7715cc43acd9a8cc26037 100644 --- a/src/version_string.h.in +++ b/src/version_string.h.in @@ -28,6 +28,7 @@ #define PACKAGE_VERSION "@PACKAGE_VERSION@" #define GIT_REVISION "@GIT_REVISION@" #define GIT_BRANCH "@GIT_BRANCH@" +#define GIT_DATE "@GIT_DATE@" #define SWIFT_CFLAGS "@SWIFT_CFLAGS@" #endif /* SWIFT_VERSION_STRING_H */ diff --git a/src/xmf.c b/src/xmf.c new file mode 100644 index 0000000000000000000000000000000000000000..7292606c9f013601db1e9e9b35ee843dea63f785 --- /dev/null +++ b/src/xmf.c @@ -0,0 +1,215 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk). + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ + +/* Config parameters. */ +#include "../config.h" + +/* Some standard headers. */ +#include <stdio.h> + +/* This object's header. */ +#include "xmf.h" + +/* Local headers. */ +#include "common_io.h" +#include "error.h" + +/** + * @brief Prepare the XMF file corresponding to a snapshot. + * + * @param baseName The common part of the file name. + */ +FILE* xmf_prepare_file(const char* baseName) { + char buffer[1024]; + + char fileName[FILENAME_BUFFER_SIZE]; + char tempFileName[FILENAME_BUFFER_SIZE]; + snprintf(fileName, FILENAME_BUFFER_SIZE, "%s.xmf", baseName); + snprintf(tempFileName, FILENAME_BUFFER_SIZE, "%s_temp.xmf", baseName); + FILE* xmfFile = fopen(fileName, "r"); + FILE* tempFile = fopen(tempFileName, "w"); + + if (xmfFile == NULL) error("Unable to open current XMF file."); + + if (tempFile == NULL) error("Unable to open temporary file."); + + /* First we make a temporary copy of the XMF file and count the lines */ + int counter = 0; + while (fgets(buffer, 1024, xmfFile) != NULL) { + counter++; + fprintf(tempFile, "%s", buffer); + } + fclose(tempFile); + fclose(xmfFile); + + /* We then copy the XMF file back up to the closing lines */ + xmfFile = fopen(fileName, "w"); + tempFile = fopen(tempFileName, "r"); + + if (xmfFile == NULL) error("Unable to open current XMF file."); + + if (tempFile == NULL) error("Unable to open temporary file."); + + int i = 0; + while (fgets(buffer, 1024, tempFile) != NULL && i < counter - 3) { + i++; + fprintf(xmfFile, "%s", buffer); + } + fprintf(xmfFile, "\n"); + fclose(tempFile); + remove(tempFileName); + + return xmfFile; +} + +/** + * @brief Writes the begin of the XMF file + * + * @todo Exploit the XML nature of the XMF format to write a proper XML writer + * and simplify all the XMF-related stuff. + */ +void xmf_create_file(const char* baseName) { + + char fileName[FILENAME_BUFFER_SIZE]; + snprintf(fileName, FILENAME_BUFFER_SIZE, "%s.xmf", baseName); + FILE* xmfFile = fopen(fileName, "w"); + + fprintf(xmfFile, "<?xml version=\"1.0\" ?> \n"); + fprintf(xmfFile, "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []> \n"); + fprintf( + xmfFile, + "<Xdmf xmlns:xi=\"http://www.w3.org/2003/XInclude\" Version=\"2.1\">\n"); + fprintf(xmfFile, "<Domain>\n"); + fprintf(xmfFile, + "<Grid Name=\"TimeSeries\" GridType=\"Collection\" " + "CollectionType=\"Temporal\">\n\n"); + + fprintf(xmfFile, "</Grid>\n"); + fprintf(xmfFile, "</Domain>\n"); + fprintf(xmfFile, "</Xdmf>\n"); + + fclose(xmfFile); +} + +/** + * @brief Writes the part of the XMF entry presenting the geometry of the + * snapshot + * + * @param xmfFile The file to write in. + * @param hdfFileName The name of the HDF5 file corresponding to this output. + * @param time The current simulation time. + */ +void xmf_write_outputheader(FILE* xmfFile, char* hdfFileName, float time) { + /* Write end of file */ + + fprintf(xmfFile, "<!-- XMF description for file: %s -->\n", hdfFileName); + fprintf(xmfFile, + "<Grid GridType=\"Collection\" CollectionType=\"Spatial\">\n"); + fprintf(xmfFile, "<Time Type=\"Single\" Value=\"%f\"/>\n", time); +} + +/** + * @brief Writes the end of the XMF file (closes all open markups) + * + * @param xmfFile The file to write in. + * @param output The number of this output. + * @param time The current simulation time. + */ +void xmf_write_outputfooter(FILE* xmfFile, int output, float time) { + /* Write end of the section of this time step */ + + fprintf(xmfFile, + "\n</Grid> <!-- End of meta-data for output=%03i, time=%f -->\n", + output, time); + fprintf(xmfFile, "\n</Grid> <!-- timeSeries -->\n"); + fprintf(xmfFile, "</Domain>\n"); + fprintf(xmfFile, "</Xdmf>\n"); + + fclose(xmfFile); +} + +/** + * @brief Writes the header of an XMF group for a given particle type. + * + * @param xmfFile The file to write to. + * @param hdfFileName The name of the corresponding HDF5 file. + * @param N The number of particles to write. + * @param ptype The particle type we are writing. + */ +void xmf_write_groupheader(FILE* xmfFile, char* hdfFileName, size_t N, + enum part_type ptype) { + fprintf(xmfFile, "\n<Grid Name=\"%s\" GridType=\"Uniform\">\n", + part_type_names[ptype]); + fprintf(xmfFile, + "<Topology TopologyType=\"Polyvertex\" Dimensions=\"%zu\"/>\n", N); + fprintf(xmfFile, "<Geometry GeometryType=\"XYZ\">\n"); + fprintf(xmfFile, + "<DataItem Dimensions=\"%zu 3\" NumberType=\"Double\" " + "Precision=\"8\" " + "Format=\"HDF\">%s:/PartType%d/Coordinates</DataItem>\n", + N, hdfFileName, (int)ptype); + fprintf(xmfFile, + "</Geometry>\n <!-- Done geometry for %s, start of particle fields " + "list -->\n", + part_type_names[ptype]); +} + +/** + * @brief Writes the footer of an XMF group for a given particle type. + * + * @param xmfFile The file to write to. + * @param ptype The particle type we are writing. + */ +void xmf_write_groupfooter(FILE* xmfFile, enum part_type ptype) { + fprintf(xmfFile, "</Grid> <!-- End of meta-data for parttype=%s -->\n", + part_type_names[ptype]); +} + +/** + * @brief Writes the lines corresponding to an array of the HDF5 output + * + * @param xmfFile The file in which to write + * @param fileName The name of the HDF5 file associated to this XMF descriptor. + * @param partTypeGroupName The name of the group containing the particles in + * the HDF5 file. + * @param name The name of the array in the HDF5 file. + * @param N The number of particles. + * @param dim The dimension of the quantity (1 for scalars, 3 for vectors). + * @param type The type of the data to write. + * + * @todo Treat the types in a better way. + */ +void xmf_write_line(FILE* xmfFile, const char* fileName, + const char* partTypeGroupName, const char* name, size_t N, + int dim, enum IO_DATA_TYPE type) { + fprintf(xmfFile, + "<Attribute Name=\"%s\" AttributeType=\"%s\" Center=\"Node\">\n", + name, dim == 1 ? "Scalar" : "Vector"); + if (dim == 1) + fprintf(xmfFile, + "<DataItem Dimensions=\"%zu\" NumberType=\"Double\" " + "Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n", + N, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name); + else + fprintf(xmfFile, + "<DataItem Dimensions=\"%zu %d\" NumberType=\"Double\" " + "Precision=\"%d\" Format=\"HDF\">%s:%s/%s</DataItem>\n", + N, dim, type == FLOAT ? 4 : 8, fileName, partTypeGroupName, name); + fprintf(xmfFile, "</Attribute>\n"); +} diff --git a/src/xmf.h b/src/xmf.h new file mode 100644 index 0000000000000000000000000000000000000000..bd2781685f8d1f96daf6e5bfeb45a2bf645fca6e --- /dev/null +++ b/src/xmf.h @@ -0,0 +1,40 @@ +/******************************************************************************* + * This file is part of SWIFT. + * Copyright (c) 2017 Matthieu Schaller (matthieu.schaller@durham.ac.uk). + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + ******************************************************************************/ +#ifndef SWIFT_XMF_H +#define SWIFT_XMF_H + +/* Config parameters. */ +#include "../config.h" + +/* Local headers. */ +#include "common_io.h" +#include "part_type.h" + +void xmf_create_file(const char* baseName); +FILE* xmf_prepare_file(const char* baseName); +void xmf_write_outputheader(FILE* xmfFile, char* hdfFileName, float time); +void xmf_write_outputfooter(FILE* xmfFile, int outputCount, float time); +void xmf_write_groupheader(FILE* xmfFile, char* hdfFileName, size_t N, + enum part_type ptype); +void xmf_write_groupfooter(FILE* xmfFile, enum part_type ptype); +void xmf_write_line(FILE* xmfFile, const char* fileName, + const char* partTypeGroupName, const char* name, size_t N, + int dim, enum IO_DATA_TYPE type); + +#endif /* SWIFT_XMF_H */