Commit d8fbdc2d authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

Merge branch 'serial_io' of gitlab.cosma.dur.ac.uk:swift/swiftsim into mpi_fixes

Doing this to get the examples to work correctly.


Former-commit-id: def96923e54cdb82902539fd83bab6f86b9de69a
parents 8993d1fc 593c1b41
......@@ -677,11 +677,16 @@ int main ( int argc , char *argv[] ) {
/* Read particles and space information from (GADGET) IC */
tic = getticks();
#ifdef WITH_MPI
#if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
read_ic_parallel( ICfileName , dim , &parts , &N , &periodic, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL );
#else
read_ic( ICfileName , dim , &parts , &N , &periodic );
read_ic_serial( ICfileName , dim , &parts , &N , &periodic, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL );
#endif
#else
read_ic_single( ICfileName , dim , &parts , &N , &periodic );
#endif
if ( myrank == 0 )
message( "reading particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout);
......@@ -778,12 +783,17 @@ int main ( int argc , char *argv[] ) {
engine_redistribute ( &e );
#endif
message("Before write !");
/* Write the state of the system as it is before starting time integration. */
tic = getticks();
#ifdef WITH_MPI
#if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
write_output(&e, &us);
write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else
write_output_single(&e, &us);
#endif
message( "writing particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout);
......@@ -837,11 +847,17 @@ int main ( int argc , char *argv[] ) {
if ( j % 100 == 0 )
{
#ifdef WITH_MPI
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else
write_output(&e, &us);
write_output_single(&e, &us);
#endif
}
/* Dump a line of agregate output. */
......@@ -858,20 +874,18 @@ int main ( int argc , char *argv[] ) {
/* for ( k = 0 ; k < 5 ; k++ )
printgParticle( s.gparts , pid[k] , N ); */
}
}
/* Print the values of the runner histogram. */
#ifdef HIST
printf( "main: runner histogram data:\n" );
for ( k = 0 ; k < runner_hist_N ; k++ )
printf( " %e %e %e\n" ,
runner_hist_a + k * (runner_hist_b - runner_hist_a) / runner_hist_N ,
runner_hist_a + (k + 1) * (runner_hist_b - runner_hist_a) / runner_hist_N ,
(double)runner_hist_bins[k] );
#endif
#ifdef HIST
printf( "main: runner histogram data:\n" );
for ( k = 0 ; k < runner_hist_N ; k++ )
printf( " %e %e %e\n" ,
runner_hist_a + k * (runner_hist_b - runner_hist_a) / runner_hist_N ,
runner_hist_a + (k + 1) * (runner_hist_b - runner_hist_a) / runner_hist_N ,
(double)runner_hist_bins[k] );
#endif
// write_output( &e );
/* Loop over the parts directly. */
// for ( k = 0 ; k < N ; k++ )
// printf( " %i %e %e\n" , s.parts[k].id , s.parts[k].count , s.parts[k].count_dh );
......@@ -904,16 +918,20 @@ int main ( int argc , char *argv[] ) {
#endif */
/* Write final output. */
#ifdef WITH_MPI
write_output_parallel( &e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL );
#if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else
write_output( &e , &us );
write_output_single(&e, &us);
#endif
#ifdef WITH_MPI
if ( MPI_Finalize() != MPI_SUCCESS )
error( "call to MPI_Finalize failed with error %i." , res );
#endif
if ( MPI_Finalize() != MPI_SUCCESS )
error( "call to MPI_Finalize failed with error %i." , res );
#endif
/* Say goodbye. */
message( "done." );
......@@ -921,4 +939,4 @@ int main ( int argc , char *argv[] ) {
/* All is calm, all is bright. */
return 0;
}
}
......@@ -35,12 +35,12 @@ endif
# List required headers
include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \
engine.h swift.h serial_io.h timers.h debug.h scheduler.h proxy.h parallel_io.h \
common_io.h multipole.h
common_io.h single_io.h multipole.h
# Common source files
AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
serial_io.c timers.c debug.c scheduler.c proxy.c parallel_io.c \
units.c common_io.c multipole.c version.c
units.c common_io.c single_io.c multipole.c version.c
# Include files for distribution, not installation.
noinst_HEADERS = atomic.h cycle.h error.h inline.h kernel.h vector.h \
......
......@@ -21,7 +21,7 @@
/* Config parameters. */
#include "../config.h"
#if defined(HAVE_HDF5) && defined(WITH_MPI)
#if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
/* Tell hdf5 that we intend to use shared-memory parallel stuff. */
#define H5_HAVE_PARALLEL
......@@ -90,24 +90,19 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
error( "Compulsory data set '%s' not present in the file." , name );
}
else
{
/* message("Optional data set '%s' not present. Zeroing this particle field...", name); */
{
for(i=0; i<N; ++i)
memset(part_c+i*partSize, 0, copySize);
return;
}
}
}
/* message( "Reading %s '%s' array...", importance == COMPULSORY ? "compulsory": "optional ", name); */
/* Open data space in file */
h_data = H5Dopen2(grp, name, H5P_DEFAULT);
if(h_data < 0)
{
error( "Error while opening data space '%s'." , name );
}
error( "Error while opening data space '%s'." , name );
/* Check data type */
h_type = H5Dget_type(h_data);
......@@ -121,6 +116,7 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
if(temp == NULL)
error("Unable to allocate memory for temporary buffer");
/* Prepare information for hyperslab */
if(dim > 1)
{
rank = 2;
......@@ -161,6 +157,9 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
/* Free and close everything */
free(temp);
H5Pclose(h_plist_id);
H5Sclose(h_filespace);
H5Sclose(h_memspace);
H5Tclose(h_type);
H5Dclose(h_data);
}
......@@ -212,9 +211,9 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, plist_id);
hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(h_plist_id, comm, info);
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
if(h_file < 0)
{
error( "Error while opening file '%s'." , fileName );
......@@ -285,10 +284,13 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
/* Close particle group */
H5Gclose(h_grp);
/* message("Done Reading particles..."); */
/* Close property handler */
H5Pclose(h_plist_id);
/* Close file */
H5Fclose(h_file);
/* message("Done Reading particles..."); */
}
......@@ -318,7 +320,7 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
*
* Calls #error() if an error occurs.
*/
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, int N_total, int mpi_rank, int offset, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor)
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, long long N_total, int mpi_rank, long long offset, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor)
{
hid_t h_data=0, h_err=0, h_memspace=0, h_filespace=0, h_plist_id=0;
hsize_t shape[2], shape_total[2], offsets[2];
......@@ -389,7 +391,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
h_data = H5Dcreate1(grp, name, hdf5Type(type), h_filespace, H5P_DEFAULT);
if(h_data < 0)
{
error( "Error while creating dataspace '%s'." , name );
error( "Error while creating dataset '%s'." , name );
}
H5Sclose(h_filespace);
......@@ -423,6 +425,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
H5Dclose(h_data);
H5Pclose(h_plist_id);
H5Sclose(h_memspace);
H5Sclose(h_filespace);
}
/**
......@@ -450,6 +453,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
* @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
*
* @param e The engine containing all the system.
* @param us The UnitSystem used for the conversion of units in the output
*
* Creates an HDF5 output file and writes the particles contained
* in the engine. If such a file already exists, it is erased and replaced
......@@ -491,7 +495,6 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info);
h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
H5Pclose(plist_id);
if(h_file < 0)
{
error( "Error while opening file '%s'." , fileName );
......@@ -580,6 +583,9 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra
/* message("Done writing particles..."); */
/* Close property descriptor */
H5Pclose(plist_id);
/* Close file */
H5Fclose(h_file);
......
......@@ -18,7 +18,7 @@
******************************************************************************/
#if defined(HAVE_HDF5) && defined(WITH_MPI)
#if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info);
......
This diff is collapsed.
......@@ -18,11 +18,11 @@
******************************************************************************/
#if defined(HAVE_HDF5) && !defined(WITH_MPI)
#if defined(HAVE_HDF5) && defined(WITH_MPI) && !defined(HAVE_PARALLEL_HDF5)
void read_ic ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic);
void read_ic_serial ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info);
void write_output ( struct engine* e, struct UnitSystem* us );
void write_output_serial ( struct engine* e, struct UnitSystem* us, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info );
#endif
/*******************************************************************************
* This file is part of SWIFT.
* Coypright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
* Matthieu Schaller (matthieu.schaller@durham.ac.uk).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
******************************************************************************/
/* Config parameters. */
#include "../config.h"
#if defined(HAVE_HDF5) && !defined(WITH_MPI)
/* Some standard headers. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <hdf5.h>
#include <math.h>
#include "const.h"
#include "cycle.h"
#include "lock.h"
#include "task.h"
#include "part.h"
#include "space.h"
#include "scheduler.h"
#include "engine.h"
#include "error.h"
#include "kernel.h"
#include "common_io.h"
/*-----------------------------------------------------------------------------
* Routines reading an IC file
*-----------------------------------------------------------------------------*/
/**
* @brief Reads a data array from a given HDF5 group.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part_c A (char*) pointer on the first occurence of the field of interest in the parts array
* @param importance If COMPULSORY, the data must be present in the IC file. If OPTIONAL, the array will be zeroed when the data is not present.
*
* @todo A better version using HDF5 hyperslabs to read the file directly into the part array
* will be written once the strucutres have been stabilized.
*
* Calls #error() if an error occurs.
*/
void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim, char* part_c, enum DATA_IMPORTANCE importance)
{
hid_t h_data=0, h_err=0, h_type=0;
htri_t exist=0;
void* temp;
int i=0;
const size_t typeSize = sizeOfType(type);
const size_t copySize = typeSize * dim;
const size_t partSize = sizeof(struct part);
char* temp_c = 0;
/* Check whether the dataspace exists or not */
exist = H5Lexists(grp, name, 0);
if(exist < 0)
{
error( "Error while checking the existence of data set '%s'." , name );
}
else if(exist == 0)
{
if(importance == COMPULSORY)
{
error( "Compulsory data set '%s' not present in the file." , name );
}
else
{
/* message("Optional data set '%s' not present. Zeroing this particle field...", name); */
for(i=0; i<N; ++i)
memset(part_c+i*partSize, 0, copySize);
return;
}
}
/* message( "Reading %s '%s' array...", importance == COMPULSORY ? "compulsory": "optional ", name); */
/* Open data space */
h_data = H5Dopen1(grp, name);
if(h_data < 0)
{
error( "Error while opening data space '%s'." , name );
}
/* Check data type */
h_type = H5Dget_type(h_data);
if(h_type < 0)
error("Unable to retrieve data type from the file");
if(!H5Tequal(h_type, hdf5Type(type)))
error("Non-matching types between the code and the file");
/* Allocate temporary buffer */
temp = malloc(N * dim * sizeOfType(type));
if(temp == NULL)
error("Unable to allocate memory for temporary buffer");
/* Read HDF5 dataspace in temporary buffer */
/* Dirty version that happens to work for vectors but should be improved */
/* Using HDF5 dataspaces would be better */
h_err = H5Dread(h_data, hdf5Type(type), H5S_ALL, H5S_ALL, H5P_DEFAULT, temp);
if(h_err < 0)
{
error( "Error while reading data array '%s'." , name );
}
/* Copy temporary buffer to particle data */
temp_c = temp;
for(i=0; i<N; ++i)
memcpy(part_c+i*partSize, &temp_c[i*copySize], copySize);
/* Free and close everything */
free(temp);
H5Tclose(h_type);
H5Dclose(h_data);
}
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, field, importance) readArrayBackEnd(grp, name, type, N, dim, (char*)(&(part[0]).field), importance)
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type)
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snaphsots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_single ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic)
{
hid_t h_file=0, h_grp=0;
double boxSize[3]={0.0,-1.0,-1.0}; /* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6]={0}; /* GADGET has 6 particle types. We only keep the type 0*/
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if(h_file < 0)
{
error( "Error while opening file '%s'." , fileName );
}
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if(h_grp < 0)
error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if(h_grp < 0)
error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
*N = numParticles[0];
dim[0] = boxSize[0];
dim[1] = ( boxSize[1] < 0 ) ? boxSize[0] : boxSize[1];
dim[2] = ( boxSize[2] < 0 ) ? boxSize[0] : boxSize[2];
/* message("Found %d particles in a %speriodic box of size [%f %f %f].", */
/* *N, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
/* Close header */
H5Gclose(h_grp);
/* Allocate memory to store particles */
if(posix_memalign( (void*)parts , part_align , *N * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero( *parts , *N * sizeof(struct part) );
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / (1024.*1024.)); */
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if(h_grp < 0)
error( "Error while opening particle group.\n");
/* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, x, COMPULSORY);
readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, v, COMPULSORY);
readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, mass, COMPULSORY);
readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, h, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, u, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, id, COMPULSORY);
readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, dt, OPTIONAL);
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, a, OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, rho, OPTIONAL );
/* Close particle group */
H5Gclose(h_grp);
/* message("Done Reading particles..."); */
/* Close file */
H5Fclose(h_file);
}
/*-----------------------------------------------------------------------------
* Routines writing an output file
*-----------------------------------------------------------------------------*/
/**
* @brief Writes a data array in given HDF5 group.
*
* @param grp The group in which to write.
* @param fileName The name of the file in which the data is written
* @param xmfFile The FILE used to write the XMF description
* @param name The name of the array to write.
* @param type The #DATA_TYPE of the array.
* @param N The number of particles to write.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part_c A (char*) pointer on the first occurence of the field of interest in the parts array
* @param us The UnitSystem currently in use
* @param convFactor The UnitConversionFactor for this array
*
* @todo A better version using HDF5 hyperslabs to write the file directly from the part array
* will be written once the strucutres have been stabilized.
*
* Calls #error() if an error occurs.
*/
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor)
{
hid_t h_data=0, h_err=0, h_space=0;
void* temp = 0;
int i=0, rank=0;
const size_t typeSize = sizeOfType(type);
const size_t copySize = typeSize * dim;
const size_t partSize = sizeof(struct part);
char* temp_c = 0;
hsize_t shape[2];
char buffer[150];
/* message("Writing '%s' array...", name); */
/* Allocate temporary buffer */
temp = malloc(N * dim * sizeOfType(type));
if(temp == NULL)
error("Unable to allocate memory for temporary buffer");
/* Copy particle data to temporary buffer */
temp_c = temp;
for(i=0; i<N; ++i)
memcpy(&temp_c[i*copySize], part_c+i*partSize, copySize);
/* Create data space */
h_space = H5Screate(H5S_SIMPLE);
if(h_space < 0)
{
error( "Error while creating data space for field '%s'." , name );
}
if(dim > 1)
{
rank = 2;
shape[0] = N; shape[1] = dim;
}
else
{
rank = 1;
shape[0] = N; shape[1] = 0;
}
/* Change shape of data space */
h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
if(h_err < 0)
{
error( "Error while changing data space shape for field '%s'." , name );
}
/* Create dataset */
h_data = H5Dcreate1(grp, name, hdf5Type(type), h_space, H5P_DEFAULT);
if(h_data < 0)
{
error( "Error while creating dataspace '%s'." , name );