Skip to content
Snippets Groups Projects
Commit d8fbdc2d authored by Pedro Gonnet's avatar Pedro Gonnet
Browse files

Merge branch 'serial_io' of gitlab.cosma.dur.ac.uk:swift/swiftsim into mpi_fixes

Doing this to get the examples to work correctly.


Former-commit-id: def96923e54cdb82902539fd83bab6f86b9de69a
parents 8993d1fc 593c1b41
Branches
Tags
No related merge requests found
...@@ -677,11 +677,16 @@ int main ( int argc , char *argv[] ) { ...@@ -677,11 +677,16 @@ int main ( int argc , char *argv[] ) {
/* Read particles and space information from (GADGET) IC */ /* Read particles and space information from (GADGET) IC */
tic = getticks(); tic = getticks();
#ifdef WITH_MPI #if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
read_ic_parallel( ICfileName , dim , &parts , &N , &periodic, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL ); read_ic_parallel( ICfileName , dim , &parts , &N , &periodic, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL );
#else #else
read_ic( ICfileName , dim , &parts , &N , &periodic ); read_ic_serial( ICfileName , dim , &parts , &N , &periodic, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL );
#endif
#else
read_ic_single( ICfileName , dim , &parts , &N , &periodic );
#endif #endif
if ( myrank == 0 ) if ( myrank == 0 )
message( "reading particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout); message( "reading particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout);
...@@ -778,12 +783,17 @@ int main ( int argc , char *argv[] ) { ...@@ -778,12 +783,17 @@ int main ( int argc , char *argv[] ) {
engine_redistribute ( &e ); engine_redistribute ( &e );
#endif #endif
message("Before write !");
/* Write the state of the system as it is before starting time integration. */ /* Write the state of the system as it is before starting time integration. */
tic = getticks(); tic = getticks();
#ifdef WITH_MPI #if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL); write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else #else
write_output(&e, &us); write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else
write_output_single(&e, &us);
#endif #endif
message( "writing particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout); message( "writing particle properties took %.3f ms." , ((double)(getticks() - tic)) / CPU_TPS * 1000 ); fflush(stdout);
...@@ -837,11 +847,17 @@ int main ( int argc , char *argv[] ) { ...@@ -837,11 +847,17 @@ int main ( int argc , char *argv[] ) {
if ( j % 100 == 0 ) if ( j % 100 == 0 )
{ {
#ifdef WITH_MPI
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL); #if defined( WITH_MPI )
#if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else #else
write_output(&e, &us); write_output_single(&e, &us);
#endif #endif
} }
/* Dump a line of agregate output. */ /* Dump a line of agregate output. */
...@@ -858,20 +874,18 @@ int main ( int argc , char *argv[] ) { ...@@ -858,20 +874,18 @@ int main ( int argc , char *argv[] ) {
/* for ( k = 0 ; k < 5 ; k++ ) /* for ( k = 0 ; k < 5 ; k++ )
printgParticle( s.gparts , pid[k] , N ); */ printgParticle( s.gparts , pid[k] , N ); */
} }
/* Print the values of the runner histogram. */ /* Print the values of the runner histogram. */
#ifdef HIST #ifdef HIST
printf( "main: runner histogram data:\n" ); printf( "main: runner histogram data:\n" );
for ( k = 0 ; k < runner_hist_N ; k++ ) for ( k = 0 ; k < runner_hist_N ; k++ )
printf( " %e %e %e\n" , printf( " %e %e %e\n" ,
runner_hist_a + k * (runner_hist_b - runner_hist_a) / runner_hist_N , runner_hist_a + k * (runner_hist_b - runner_hist_a) / runner_hist_N ,
runner_hist_a + (k + 1) * (runner_hist_b - runner_hist_a) / runner_hist_N , runner_hist_a + (k + 1) * (runner_hist_b - runner_hist_a) / runner_hist_N ,
(double)runner_hist_bins[k] ); (double)runner_hist_bins[k] );
#endif #endif
// write_output( &e );
/* Loop over the parts directly. */ /* Loop over the parts directly. */
// for ( k = 0 ; k < N ; k++ ) // for ( k = 0 ; k < N ; k++ )
// printf( " %i %e %e\n" , s.parts[k].id , s.parts[k].count , s.parts[k].count_dh ); // printf( " %i %e %e\n" , s.parts[k].id , s.parts[k].count , s.parts[k].count_dh );
...@@ -904,16 +918,20 @@ int main ( int argc , char *argv[] ) { ...@@ -904,16 +918,20 @@ int main ( int argc , char *argv[] ) {
#endif */ #endif */
/* Write final output. */ /* Write final output. */
#ifdef WITH_MPI #if defined( WITH_MPI )
write_output_parallel( &e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL ); #if defined( HAVE_PARALLEL_HDF5 )
write_output_parallel(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#else
write_output_serial(&e, &us, myrank, nr_nodes, MPI_COMM_WORLD, MPI_INFO_NULL);
#endif
#else #else
write_output( &e , &us ); write_output_single(&e, &us);
#endif #endif
#ifdef WITH_MPI #ifdef WITH_MPI
if ( MPI_Finalize() != MPI_SUCCESS ) if ( MPI_Finalize() != MPI_SUCCESS )
error( "call to MPI_Finalize failed with error %i." , res ); error( "call to MPI_Finalize failed with error %i." , res );
#endif #endif
/* Say goodbye. */ /* Say goodbye. */
message( "done." ); message( "done." );
...@@ -921,4 +939,4 @@ int main ( int argc , char *argv[] ) { ...@@ -921,4 +939,4 @@ int main ( int argc , char *argv[] ) {
/* All is calm, all is bright. */ /* All is calm, all is bright. */
return 0; return 0;
} }
...@@ -35,12 +35,12 @@ endif ...@@ -35,12 +35,12 @@ endif
# List required headers # List required headers
include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \ include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \
engine.h swift.h serial_io.h timers.h debug.h scheduler.h proxy.h parallel_io.h \ engine.h swift.h serial_io.h timers.h debug.h scheduler.h proxy.h parallel_io.h \
common_io.h multipole.h common_io.h single_io.h multipole.h
# Common source files # Common source files
AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
serial_io.c timers.c debug.c scheduler.c proxy.c parallel_io.c \ serial_io.c timers.c debug.c scheduler.c proxy.c parallel_io.c \
units.c common_io.c multipole.c version.c units.c common_io.c single_io.c multipole.c version.c
# Include files for distribution, not installation. # Include files for distribution, not installation.
noinst_HEADERS = atomic.h cycle.h error.h inline.h kernel.h vector.h \ noinst_HEADERS = atomic.h cycle.h error.h inline.h kernel.h vector.h \
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
/* Config parameters. */ /* Config parameters. */
#include "../config.h" #include "../config.h"
#if defined(HAVE_HDF5) && defined(WITH_MPI) #if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
/* Tell hdf5 that we intend to use shared-memory parallel stuff. */ /* Tell hdf5 that we intend to use shared-memory parallel stuff. */
#define H5_HAVE_PARALLEL #define H5_HAVE_PARALLEL
...@@ -90,24 +90,19 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim ...@@ -90,24 +90,19 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
error( "Compulsory data set '%s' not present in the file." , name ); error( "Compulsory data set '%s' not present in the file." , name );
} }
else else
{ {
/* message("Optional data set '%s' not present. Zeroing this particle field...", name); */
for(i=0; i<N; ++i) for(i=0; i<N; ++i)
memset(part_c+i*partSize, 0, copySize); memset(part_c+i*partSize, 0, copySize);
return; return;
} }
} }
/* message( "Reading %s '%s' array...", importance == COMPULSORY ? "compulsory": "optional ", name); */ /* message( "Reading %s '%s' array...", importance == COMPULSORY ? "compulsory": "optional ", name); */
/* Open data space in file */ /* Open data space in file */
h_data = H5Dopen2(grp, name, H5P_DEFAULT); h_data = H5Dopen2(grp, name, H5P_DEFAULT);
if(h_data < 0) if(h_data < 0)
{ error( "Error while opening data space '%s'." , name );
error( "Error while opening data space '%s'." , name );
}
/* Check data type */ /* Check data type */
h_type = H5Dget_type(h_data); h_type = H5Dget_type(h_data);
...@@ -121,6 +116,7 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim ...@@ -121,6 +116,7 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
if(temp == NULL) if(temp == NULL)
error("Unable to allocate memory for temporary buffer"); error("Unable to allocate memory for temporary buffer");
/* Prepare information for hyperslab */
if(dim > 1) if(dim > 1)
{ {
rank = 2; rank = 2;
...@@ -161,6 +157,9 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim ...@@ -161,6 +157,9 @@ void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim
/* Free and close everything */ /* Free and close everything */
free(temp); free(temp);
H5Pclose(h_plist_id);
H5Sclose(h_filespace);
H5Sclose(h_memspace);
H5Tclose(h_type); H5Tclose(h_type);
H5Dclose(h_data); H5Dclose(h_data);
} }
...@@ -212,9 +211,9 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int ...@@ -212,9 +211,9 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
/* Open file */ /* Open file */
/* message("Opening file '%s' as IC.", fileName); */ /* message("Opening file '%s' as IC.", fileName); */
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info); H5Pset_fapl_mpio(h_plist_id, comm, info);
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, plist_id); h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
if(h_file < 0) if(h_file < 0)
{ {
error( "Error while opening file '%s'." , fileName ); error( "Error while opening file '%s'." , fileName );
...@@ -285,10 +284,13 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int ...@@ -285,10 +284,13 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
/* Close particle group */ /* Close particle group */
H5Gclose(h_grp); H5Gclose(h_grp);
/* message("Done Reading particles..."); */ /* Close property handler */
H5Pclose(h_plist_id);
/* Close file */ /* Close file */
H5Fclose(h_file); H5Fclose(h_file);
/* message("Done Reading particles..."); */
} }
...@@ -318,7 +320,7 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int ...@@ -318,7 +320,7 @@ void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int
* *
* Calls #error() if an error occurs. * Calls #error() if an error occurs.
*/ */
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, int N_total, int mpi_rank, int offset, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor) void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, long long N_total, int mpi_rank, long long offset, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor)
{ {
hid_t h_data=0, h_err=0, h_memspace=0, h_filespace=0, h_plist_id=0; hid_t h_data=0, h_err=0, h_memspace=0, h_filespace=0, h_plist_id=0;
hsize_t shape[2], shape_total[2], offsets[2]; hsize_t shape[2], shape_total[2], offsets[2];
...@@ -389,7 +391,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu ...@@ -389,7 +391,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
h_data = H5Dcreate1(grp, name, hdf5Type(type), h_filespace, H5P_DEFAULT); h_data = H5Dcreate1(grp, name, hdf5Type(type), h_filespace, H5P_DEFAULT);
if(h_data < 0) if(h_data < 0)
{ {
error( "Error while creating dataspace '%s'." , name ); error( "Error while creating dataset '%s'." , name );
} }
H5Sclose(h_filespace); H5Sclose(h_filespace);
...@@ -423,6 +425,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu ...@@ -423,6 +425,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
H5Dclose(h_data); H5Dclose(h_data);
H5Pclose(h_plist_id); H5Pclose(h_plist_id);
H5Sclose(h_memspace); H5Sclose(h_memspace);
H5Sclose(h_filespace);
} }
/** /**
...@@ -450,6 +453,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu ...@@ -450,6 +453,7 @@ void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enu
* @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
* *
* @param e The engine containing all the system. * @param e The engine containing all the system.
* @param us The UnitSystem used for the conversion of units in the output
* *
* Creates an HDF5 output file and writes the particles contained * Creates an HDF5 output file and writes the particles contained
* in the engine. If such a file already exists, it is erased and replaced * in the engine. If such a file already exists, it is erased and replaced
...@@ -491,7 +495,6 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra ...@@ -491,7 +495,6 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra
hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS); hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(plist_id, comm, info); H5Pset_fapl_mpio(plist_id, comm, info);
h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
H5Pclose(plist_id);
if(h_file < 0) if(h_file < 0)
{ {
error( "Error while opening file '%s'." , fileName ); error( "Error while opening file '%s'." , fileName );
...@@ -580,6 +583,9 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra ...@@ -580,6 +583,9 @@ void write_output_parallel (struct engine *e, struct UnitSystem* us, int mpi_ra
/* message("Done writing particles..."); */ /* message("Done writing particles..."); */
/* Close property descriptor */
H5Pclose(plist_id);
/* Close file */ /* Close file */
H5Fclose(h_file); H5Fclose(h_file);
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
******************************************************************************/ ******************************************************************************/
#if defined(HAVE_HDF5) && defined(WITH_MPI) #if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info); void read_ic_parallel ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info);
......
This diff is collapsed.
...@@ -18,11 +18,11 @@ ...@@ -18,11 +18,11 @@
******************************************************************************/ ******************************************************************************/
#if defined(HAVE_HDF5) && !defined(WITH_MPI) #if defined(HAVE_HDF5) && defined(WITH_MPI) && !defined(HAVE_PARALLEL_HDF5)
void read_ic ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic); void read_ic_serial ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info);
void write_output ( struct engine* e, struct UnitSystem* us ); void write_output_serial ( struct engine* e, struct UnitSystem* us, int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info );
#endif #endif
/*******************************************************************************
* This file is part of SWIFT.
* Coypright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
* Matthieu Schaller (matthieu.schaller@durham.ac.uk).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
******************************************************************************/
/* Config parameters. */
#include "../config.h"
#if defined(HAVE_HDF5) && !defined(WITH_MPI)
/* Some standard headers. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <hdf5.h>
#include <math.h>
#include "const.h"
#include "cycle.h"
#include "lock.h"
#include "task.h"
#include "part.h"
#include "space.h"
#include "scheduler.h"
#include "engine.h"
#include "error.h"
#include "kernel.h"
#include "common_io.h"
/*-----------------------------------------------------------------------------
* Routines reading an IC file
*-----------------------------------------------------------------------------*/
/**
* @brief Reads a data array from a given HDF5 group.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part_c A (char*) pointer on the first occurence of the field of interest in the parts array
* @param importance If COMPULSORY, the data must be present in the IC file. If OPTIONAL, the array will be zeroed when the data is not present.
*
* @todo A better version using HDF5 hyperslabs to read the file directly into the part array
* will be written once the strucutres have been stabilized.
*
* Calls #error() if an error occurs.
*/
void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N, int dim, char* part_c, enum DATA_IMPORTANCE importance)
{
hid_t h_data=0, h_err=0, h_type=0;
htri_t exist=0;
void* temp;
int i=0;
const size_t typeSize = sizeOfType(type);
const size_t copySize = typeSize * dim;
const size_t partSize = sizeof(struct part);
char* temp_c = 0;
/* Check whether the dataspace exists or not */
exist = H5Lexists(grp, name, 0);
if(exist < 0)
{
error( "Error while checking the existence of data set '%s'." , name );
}
else if(exist == 0)
{
if(importance == COMPULSORY)
{
error( "Compulsory data set '%s' not present in the file." , name );
}
else
{
/* message("Optional data set '%s' not present. Zeroing this particle field...", name); */
for(i=0; i<N; ++i)
memset(part_c+i*partSize, 0, copySize);
return;
}
}
/* message( "Reading %s '%s' array...", importance == COMPULSORY ? "compulsory": "optional ", name); */
/* Open data space */
h_data = H5Dopen1(grp, name);
if(h_data < 0)
{
error( "Error while opening data space '%s'." , name );
}
/* Check data type */
h_type = H5Dget_type(h_data);
if(h_type < 0)
error("Unable to retrieve data type from the file");
if(!H5Tequal(h_type, hdf5Type(type)))
error("Non-matching types between the code and the file");
/* Allocate temporary buffer */
temp = malloc(N * dim * sizeOfType(type));
if(temp == NULL)
error("Unable to allocate memory for temporary buffer");
/* Read HDF5 dataspace in temporary buffer */
/* Dirty version that happens to work for vectors but should be improved */
/* Using HDF5 dataspaces would be better */
h_err = H5Dread(h_data, hdf5Type(type), H5S_ALL, H5S_ALL, H5P_DEFAULT, temp);
if(h_err < 0)
{
error( "Error while reading data array '%s'." , name );
}
/* Copy temporary buffer to particle data */
temp_c = temp;
for(i=0; i<N; ++i)
memcpy(part_c+i*partSize, &temp_c[i*copySize], copySize);
/* Free and close everything */
free(temp);
H5Tclose(h_type);
H5Dclose(h_data);
}
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group from which to read.
* @param name The name of the array to read.
* @param type The #DATA_TYPE of the attribute.
* @param N The number of particles.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part The array of particles to fill
* @param field The name of the field (C code name as defined in part.h) to fill
* @param importance Is the data compulsory or not
*
*/
#define readArray(grp, name, type, N, dim, part, field, importance) readArrayBackEnd(grp, name, type, N, dim, (char*)(&(part[0]).field), importance)
/**
* @brief Reads an HDF5 initial condition file (GADGET-3 type)
*
* @param fileName The file to read.
* @param dim (output) The dimension of the volume read from the file.
* @param parts (output) The array of #part read from the file.
* @param N (output) The number of particles read from the file.
* @param periodic (output) 1 if the volume is periodic, 0 if not.
*
* Opens the HDF5 file fileName and reads the particles contained
* in the parts array. N is the returned number of particles found
* in the file.
*
* @warning Can not read snapshot distributed over more than 1 file !!!
* @todo Read snaphsots distributed in more than one file.
*
* Calls #error() if an error occurs.
*
*/
void read_ic_single ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic)
{
hid_t h_file=0, h_grp=0;
double boxSize[3]={0.0,-1.0,-1.0}; /* GADGET has only cubic boxes (in cosmological mode) */
int numParticles[6]={0}; /* GADGET has 6 particle types. We only keep the type 0*/
/* Open file */
/* message("Opening file '%s' as IC.", fileName); */
h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
if(h_file < 0)
{
error( "Error while opening file '%s'." , fileName );
}
/* Open header to read simulation properties */
/* message("Reading runtime parameters..."); */
h_grp = H5Gopen1(h_file, "/RuntimePars");
if(h_grp < 0)
error("Error while opening runtime parameters\n");
/* Read the relevant information */
readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to read simulation properties */
/* message("Reading file header..."); */
h_grp = H5Gopen1(h_file, "/Header");
if(h_grp < 0)
error("Error while opening file header\n");
/* Read the relevant information and print status */
readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
*N = numParticles[0];
dim[0] = boxSize[0];
dim[1] = ( boxSize[1] < 0 ) ? boxSize[0] : boxSize[1];
dim[2] = ( boxSize[2] < 0 ) ? boxSize[0] : boxSize[2];
/* message("Found %d particles in a %speriodic box of size [%f %f %f].", */
/* *N, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */
/* Close header */
H5Gclose(h_grp);
/* Allocate memory to store particles */
if(posix_memalign( (void*)parts , part_align , *N * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero( *parts , *N * sizeof(struct part) );
/* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / (1024.*1024.)); */
/* Open SPH particles group */
/* message("Reading particle arrays..."); */
h_grp = H5Gopen1(h_file, "/PartType0");
if(h_grp < 0)
error( "Error while opening particle group.\n");
/* Read arrays */
readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, x, COMPULSORY);
readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, v, COMPULSORY);
readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, mass, COMPULSORY);
readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, h, COMPULSORY);
readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, u, COMPULSORY);
readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, id, COMPULSORY);
readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, dt, OPTIONAL);
readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, a, OPTIONAL);
readArray(h_grp, "Density", FLOAT, *N, 1, *parts, rho, OPTIONAL );
/* Close particle group */
H5Gclose(h_grp);
/* message("Done Reading particles..."); */
/* Close file */
H5Fclose(h_file);
}
/*-----------------------------------------------------------------------------
* Routines writing an output file
*-----------------------------------------------------------------------------*/
/**
* @brief Writes a data array in given HDF5 group.
*
* @param grp The group in which to write.
* @param fileName The name of the file in which the data is written
* @param xmfFile The FILE used to write the XMF description
* @param name The name of the array to write.
* @param type The #DATA_TYPE of the array.
* @param N The number of particles to write.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part_c A (char*) pointer on the first occurence of the field of interest in the parts array
* @param us The UnitSystem currently in use
* @param convFactor The UnitConversionFactor for this array
*
* @todo A better version using HDF5 hyperslabs to write the file directly from the part array
* will be written once the strucutres have been stabilized.
*
* Calls #error() if an error occurs.
*/
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name, enum DATA_TYPE type, int N, int dim, char* part_c, struct UnitSystem* us, enum UnitConversionFactor convFactor)
{
hid_t h_data=0, h_err=0, h_space=0;
void* temp = 0;
int i=0, rank=0;
const size_t typeSize = sizeOfType(type);
const size_t copySize = typeSize * dim;
const size_t partSize = sizeof(struct part);
char* temp_c = 0;
hsize_t shape[2];
char buffer[150];
/* message("Writing '%s' array...", name); */
/* Allocate temporary buffer */
temp = malloc(N * dim * sizeOfType(type));
if(temp == NULL)
error("Unable to allocate memory for temporary buffer");
/* Copy particle data to temporary buffer */
temp_c = temp;
for(i=0; i<N; ++i)
memcpy(&temp_c[i*copySize], part_c+i*partSize, copySize);
/* Create data space */
h_space = H5Screate(H5S_SIMPLE);
if(h_space < 0)
{
error( "Error while creating data space for field '%s'." , name );
}
if(dim > 1)
{
rank = 2;
shape[0] = N; shape[1] = dim;
}
else
{
rank = 1;
shape[0] = N; shape[1] = 0;
}
/* Change shape of data space */
h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
if(h_err < 0)
{
error( "Error while changing data space shape for field '%s'." , name );
}
/* Create dataset */
h_data = H5Dcreate1(grp, name, hdf5Type(type), h_space, H5P_DEFAULT);
if(h_data < 0)
{
error( "Error while creating dataspace '%s'." , name );
}
/* Write temporary buffer to HDF5 dataspace */
h_err = H5Dwrite(h_data, hdf5Type(type), h_space, H5S_ALL, H5P_DEFAULT, temp);
if(h_err < 0)
{
error( "Error while writing data array '%s'." , name );
}
/* Write XMF description for this data set */
writeXMFline(xmfFile, fileName, name, N, dim, type);
/* Write unit conversion factors for this data set */
conversionString( buffer, us, convFactor );
writeAttribute_d( h_data, "CGS conversion factor", conversionFactor( us, convFactor ) );
writeAttribute_f( h_data, "h-scale exponant", hFactor( us, convFactor ) );
writeAttribute_f( h_data, "a-scale exponant", aFactor( us, convFactor ) );
writeAttribute_s( h_data, "Conversion factor", buffer );
/* Free and close everything */
free(temp);
H5Dclose(h_data);
H5Sclose(h_space);
}
/**
* @brief A helper macro to call the readArrayBackEnd function more easily.
*
* @param grp The group in which to write.
* @param fileName The name of the file in which the data is written
* @param xmfFile The FILE used to write the XMF description
* @param name The name of the array to write.
* @param type The #DATA_TYPE of the array.
* @param N The number of particles to write.
* @param dim The dimension of the data (1 for scalar, 3 for vector)
* @param part A (char*) pointer on the first occurence of the field of interest in the parts array
* @param field The name (code name) of the field to read from.
* @param us The UnitSystem currently in use
* @param convFactor The UnitConversionFactor for this array
*
*/
#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, field, us, convFactor) writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, (char*)(&(part[0]).field), us, convFactor)
/**
* @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
*
* @param e The engine containing all the system.
* @param us The UnitSystem used for the conversion of units in the output
*
* Creates an HDF5 output file and writes the particles contained
* in the engine. If such a file already exists, it is erased and replaced
* by the new one.
* The companion XMF file is also updated accordingly.
*
* Calls #error() if an error occurs.
*
*/
void write_output_single (struct engine *e, struct UnitSystem* us)
{
hid_t h_file=0, h_grp=0;
int N = e->s->nr_parts;
int periodic = e->s->periodic;
int numParticles[6]={N,0};
int numParticlesHighWord[6]={0};
int numFiles = 1;
struct part* parts = e->s->parts;
FILE* xmfFile = 0;
static int outputCount = 0;
/* File name */
char fileName[200];
sprintf(fileName, "output_%03i.hdf5", outputCount);
/* First time, we need to create the XMF file */
if(outputCount == 0)
createXMFfile();
/* Prepare the XMF file for the new entry */
xmfFile = prepareXMFfile();
/* Write the part corresponding to this specific output */
writeXMFheader(xmfFile, N, fileName, e->time);
/* Open file */
/* message("Opening file '%s'.", fileName); */
h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT,H5P_DEFAULT);
if(h_file < 0)
{
error( "Error while opening file '%s'." , fileName );
}
/* Open header to write simulation properties */
/* message("Writing runtime parameters..."); */
h_grp = H5Gcreate1(h_file, "/RuntimePars", 0);
if(h_grp < 0)
error("Error while creating runtime parameters group\n");
/* Write the relevant information */
writeAttribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
/* Close runtime parameters */
H5Gclose(h_grp);
/* Open header to write simulation properties */
/* message("Writing file header..."); */
h_grp = H5Gcreate1(h_file, "/Header", 0);
if(h_grp < 0)
error("Error while creating file header\n");
/* Print the relevant information and print status */
writeAttribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
writeAttribute(h_grp, "NumPart_ThisFile", UINT, numParticles, 6);
writeAttribute(h_grp, "Time", DOUBLE, &e->time, 1);
/* GADGET-2 legacy values */
writeAttribute(h_grp, "NumPart_Total", UINT, numParticles, 6);
writeAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticlesHighWord, 6);
double MassTable[6] = {0., 0., 0., 0., 0., 0.};
writeAttribute(h_grp, "MassTable", DOUBLE, MassTable, 6);
writeAttribute(h_grp, "Flag_Entropy_ICs", UINT, numParticlesHighWord, 6);
writeAttribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);
/* Close header */
H5Gclose(h_grp);
/* Print the SPH parameters */
writeSPHflavour(h_file);
/* Print the system of Units */
writeUnitSystem(h_file, us);
/* Create SPH particles group */
/* message("Writing particle arrays..."); */
h_grp = H5Gcreate1(h_file, "/PartType0", 0);
if(h_grp < 0)
error( "Error while creating particle group.\n");
/* Write arrays */
writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts, x, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts, v, us, UNIT_CONV_SPEED);
writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, mass, us, UNIT_CONV_MASS);
writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts, h, us, UNIT_CONV_LENGTH);
writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts, id, us, UNIT_CONV_NO_UNITS);
writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, dt, us, UNIT_CONV_TIME);
writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts, a, us, UNIT_CONV_ACCELERATION);
writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, rho, us, UNIT_CONV_DENSITY);
/* Close particle group */
H5Gclose(h_grp);
/* Write LXMF file descriptor */
writeXMFfooter(xmfFile);
/* message("Done writing particles..."); */
/* Close file */
H5Fclose(h_file);
++outputCount;
}
#endif /* HAVE_HDF5 */
/*******************************************************************************
* This file is part of SWIFT.
* Coypright (c) 2012 Matthieu Schaller (matthieu.schaller@durham.ac.uk).
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
******************************************************************************/
#if defined(HAVE_HDF5) && !defined(WITH_MPI)
void read_ic_single ( char* fileName, double dim[3], struct part **parts, int* N, int* periodic);
void write_output_single ( struct engine* e, struct UnitSystem* us );
#endif
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "runner.h" #include "runner.h"
#include "engine.h" #include "engine.h"
#include "units.h" #include "units.h"
#include "single_io.h"
#include "serial_io.h" #include "serial_io.h"
#include "parallel_io.h" #include "parallel_io.h"
#include "debug.h" #include "debug.h"
......
...@@ -47,7 +47,7 @@ void initUnitSystem(struct UnitSystem* us) ...@@ -47,7 +47,7 @@ void initUnitSystem(struct UnitSystem* us)
{ {
us->UnitMass_in_cgs = const_unit_mass_in_cgs; us->UnitMass_in_cgs = const_unit_mass_in_cgs;
us->UnitLength_in_cgs = const_unit_length_in_cgs; us->UnitLength_in_cgs = const_unit_length_in_cgs;
us->UnitTime_in_cgs = 1.d / (const_unit_velocity_in_cgs / const_unit_length_in_cgs ); us->UnitTime_in_cgs = 1. / ((double) const_unit_velocity_in_cgs / ( (double)const_unit_length_in_cgs ));
us->UnitCurrent_in_cgs = 1.; us->UnitCurrent_in_cgs = 1.;
us->UnitTemperature_in_cgs = 1.; us->UnitTemperature_in_cgs = 1.;
} }
...@@ -257,8 +257,7 @@ double generalConversionFactor(struct UnitSystem* us, float baseUnitsExponants[5 ...@@ -257,8 +257,7 @@ double generalConversionFactor(struct UnitSystem* us, float baseUnitsExponants[5
for(i = 0 ; i < 5 ; ++i ) for(i = 0 ; i < 5 ; ++i )
if(baseUnitsExponants[i] != 0) if(baseUnitsExponants[i] != 0)
factor *= pow( getBaseUnit( us, i ) , baseUnitsExponants[i] ); factor *= pow( getBaseUnit( us, i ) , baseUnitsExponants[i] );
return factor; return factor;
} }
......
...@@ -43,11 +43,11 @@ struct UnitSystem ...@@ -43,11 +43,11 @@ struct UnitSystem
*/ */
enum BaseUnits enum BaseUnits
{ {
UNIT_MASS, UNIT_MASS = 0,
UNIT_LENGTH, UNIT_LENGTH = 1,
UNIT_TIME, UNIT_TIME = 2,
UNIT_CURRENT, UNIT_CURRENT = 3,
UNIT_TEMPERATURE UNIT_TEMPERATURE = 4
}; };
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment