parallel_io.c 21.9 KB
Newer Older
1
2
3
4
/*******************************************************************************
 * This file is part of SWIFT.
 * Coypright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk).
5
 *
6
7
8
9
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
10
 *
11
12
13
14
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
15
 *
16
17
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18
 *
19
20
21
22
23
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

24
#if defined(HAVE_HDF5) && defined(WITH_MPI) && defined(HAVE_PARALLEL_HDF5)
25

26
27
28
/* Tell hdf5 that we intend to use shared-memory parallel stuff. */
#define H5_HAVE_PARALLEL

29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
/* Some standard headers. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <hdf5.h>
#include <math.h>
#include <mpi.h>

#include "const.h"
#include "cycle.h"
#include "lock.h"
#include "task.h"
#include "space.h"
#include "scheduler.h"
#include "engine.h"
#include "error.h"
#include "kernel.h"
47
#include "common_io.h"
48
49
50
51
52
53
54
55
56

/**
 * @brief Reads a data array from a given HDF5 group.
 *
 * @param grp The group from which to read.
 * @param name The name of the array to read.
 * @param type The #DATA_TYPE of the attribute.
 * @param N The number of particles.
 * @param dim The dimension of the data (1 for scalar, 3 for vector)
57
58
59
60
 * @param part_c A (char*) pointer on the first occurence of the field of
 *interest in the parts array
 * @param importance If COMPULSORY, the data must be present in the IC file. If
 *OPTIONAL, the array will be zeroed when the data is not present.
61
 *
62
63
 * @todo A better version using HDF5 hyperslabs to read the file directly into
 *the part array
64
 * will be written once the strucutres have been stabilized.
65
 *
66
67
 * Calls #error() if an error occurs.
 */
68
69
70
71
72
void readArrayBackEnd(hid_t grp, char* name, enum DATA_TYPE type, int N,
                      int dim, long long N_total, long long offset,
                      char* part_c, enum DATA_IMPORTANCE importance) {
  hid_t h_data = 0, h_err = 0, h_type = 0, h_memspace = 0, h_filespace = 0,
        h_plist_id = 0;
73
  hsize_t shape[2], offsets[2];
74
  htri_t exist = 0;
75
  void* temp;
76
  int i = 0, rank = 0;
77
78
79
80
81
82
83
  const size_t typeSize = sizeOfType(type);
  const size_t copySize = typeSize * dim;
  const size_t partSize = sizeof(struct part);
  char* temp_c = 0;

  /* Check whether the dataspace exists or not */
  exist = H5Lexists(grp, name, 0);
84
85
86
87
88
89
90
91
  if (exist < 0) {
    error("Error while checking the existence of data set '%s'.", name);
  } else if (exist == 0) {
    if (importance == COMPULSORY) {
      error("Compulsory data set '%s' not present in the file.", name);
    } else {
      for (i = 0; i < N; ++i) memset(part_c + i * partSize, 0, copySize);
      return;
92
    }
93
  }
94

95
96
  /* message( "Reading %s '%s' array...", importance == COMPULSORY ?
   * "compulsory": "optional  ", name); */
97

98
99
  /* Open data space in file */
  h_data = H5Dopen2(grp, name, H5P_DEFAULT);
100
  if (h_data < 0) error("Error while opening data space '%s'.", name);
101
102
103

  /* Check data type */
  h_type = H5Dget_type(h_data);
104
105
  if (h_type < 0) error("Unable to retrieve data type from the file");
  if (!H5Tequal(h_type, hdf5Type(type)))
106
    error("Non-matching types between the code and the file");
107

108
109
  /* Allocate temporary buffer */
  temp = malloc(N * dim * sizeOfType(type));
110
  if (temp == NULL) error("Unable to allocate memory for temporary buffer");
111

112
  /* Prepare information for hyperslab */
113
114
115
116
117
118
119
120
121
122
123
124
125
  if (dim > 1) {
    rank = 2;
    shape[0] = N;
    shape[1] = dim;
    offsets[0] = offset;
    offsets[1] = 0;
  } else {
    rank = 1;
    shape[0] = N;
    shape[1] = 0;
    offsets[0] = offset;
    offsets[1] = 0;
  }
126
127
128

  /* Create data space in memory */
  h_memspace = H5Screate_simple(rank, shape, NULL);
129

130
131
132
133
134
135
136
137
  /* Select hyperslab in file */
  h_filespace = H5Dget_space(h_data);
  H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);

  /* Set collective reading properties */
  h_plist_id = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);

138
139
140
  /* Read HDF5 dataspace in temporary buffer */
  /* Dirty version that happens to work for vectors but should be improved */
  /* Using HDF5 dataspaces would be better */
141
142
143
144
145
  h_err = H5Dread(h_data, hdf5Type(type), h_memspace, h_filespace, h_plist_id,
                  temp);
  if (h_err < 0) {
    error("Error while reading data array '%s'.", name);
  }
146
147
148

  /* Copy temporary buffer to particle data */
  temp_c = temp;
149
150
151
  for (i = 0; i < N; ++i)
    memcpy(part_c + i * partSize, &temp_c[i * copySize], copySize);

152
153
  /* Free and close everything */
  free(temp);
154
155
156
  H5Pclose(h_plist_id);
  H5Sclose(h_filespace);
  H5Sclose(h_memspace);
157
158
159
160
161
162
163
164
165
166
  H5Tclose(h_type);
  H5Dclose(h_data);
}

/**
 * @brief A helper macro to call the readArrayBackEnd function more easily.
 *
 * @param grp The group from which to read.
 * @param name The name of the array to read.
 * @param type The #DATA_TYPE of the attribute.
167
 * @param N The number of particles on this MPI task.
168
169
 * @param dim The dimension of the data (1 for scalar, 3 for vector)
 * @param part The array of particles to fill
170
171
 * @param N_total Total number of particles
 * @param offset Offset in the array where this task starts reading
172
173
174
175
 * @param field The name of the field (C code name as defined in part.h) to fill
 * @param importance Is the data compulsory or not
 *
 */
176
177
178
179
#define readArray(grp, name, type, N, dim, part, N_total, offset, field, \
                  importance)                                            \
  readArrayBackEnd(grp, name, type, N, dim, N_total, offset,             \
                   (char*)(&(part[0]).field), importance)
180
181

/**
182
 * @brief Reads an HDF5 initial condition file (GADGET-3 type) in parallel
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
 *
 * @param fileName The file to read.
 * @param dim (output) The dimension of the volume read from the file.
 * @param parts (output) The array of #part read from the file.
 * @param N (output) The number of particles read from the file.
 * @param periodic (output) 1 if the volume is periodic, 0 if not.
 *
 * Opens the HDF5 file fileName and reads the particles contained
 * in the parts array. N is the returned number of particles found
 * in the file.
 *
 * @warning Can not read snapshot distributed over more than 1 file !!!
 * @todo Read snaphsots distributed in more than one file.
 *
 * Calls #error() if an error occurs.
 *
 */
200
201
202
203
204
205
206
207
208
void read_ic_parallel(char* fileName, double dim[3], struct part** parts,
                      int* N, int* periodic, int mpi_rank, int mpi_size,
                      MPI_Comm comm, MPI_Info info) {
  hid_t h_file = 0, h_grp = 0;
  double boxSize[3] = {
      0.0, -1.0, -1.0}; /* GADGET has only cubic boxes (in cosmological mode) */
  int numParticles[6] = {
      0}; /* GADGET has 6 particle types. We only keep the type 0*/
  int numParticles_highWord[6] = {0};
209
210
  long long offset = 0;
  long long N_total = 0;
211
212
213

  /* Open file */
  /* message("Opening file '%s' as IC.", fileName); */
214
215
216
  hid_t h_plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(h_plist_id, comm, info);
  h_file = H5Fopen(fileName, H5F_ACC_RDONLY, h_plist_id);
217
218
219
  if (h_file < 0) {
    error("Error while opening file '%s'.", fileName);
  }
220
221
222
223

  /* Open header to read simulation properties */
  /* message("Reading runtime parameters..."); */
  h_grp = H5Gopen1(h_file, "/RuntimePars");
224
  if (h_grp < 0) error("Error while opening runtime parameters\n");
225
226
227
228
229
230

  /* Read the relevant information */
  readAttribute(h_grp, "PeriodicBoundariesOn", INT, periodic);

  /* Close runtime parameters */
  H5Gclose(h_grp);
231

232
233
234
  /* Open header to read simulation properties */
  /* message("Reading file header..."); */
  h_grp = H5Gopen1(h_file, "/Header");
235
236
  if (h_grp < 0) error("Error while opening file header\n");

237
238
239
  /* Read the relevant information and print status */
  readAttribute(h_grp, "BoxSize", DOUBLE, boxSize);
  readAttribute(h_grp, "NumPart_Total", UINT, numParticles);
240
  readAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticles_highWord);
241

242
243
  N_total = ((long long)numParticles[0]) +
            ((long long)numParticles_highWord[0] << 32);
244
  dim[0] = boxSize[0];
245
246
  dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
  dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];
247

248
249
250
  /* Divide the particles among the tasks. */
  offset = mpi_rank * N_total / mpi_size;
  *N = (mpi_rank + 1) * N_total / mpi_size - offset;
251

252
253
254
255
256
257
258
  /* message("Found %d particles in a %speriodic box of size [%f %f %f].",  */
  /* 	 *N, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */

  /* Close header */
  H5Gclose(h_grp);

  /* Allocate memory to store particles */
259
  if (posix_memalign((void*)parts, part_align, *N * sizeof(struct part)) != 0)
260
    error("Error while allocating memory for particles");
261
262
263
264
  bzero(*parts, *N * sizeof(struct part));

  /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) /
   * (1024.*1024.)); */
265
266
267
268

  /* Open SPH particles group */
  /* message("Reading particle arrays..."); */
  h_grp = H5Gopen1(h_file, "/PartType0");
269
  if (h_grp < 0) error("Error while opening particle group.\n");
270
271

  /* Read arrays */
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
  readArray(h_grp, "Coordinates", DOUBLE, *N, 3, *parts, N_total, offset, x,
            COMPULSORY);
  readArray(h_grp, "Velocities", FLOAT, *N, 3, *parts, N_total, offset, v,
            COMPULSORY);
  readArray(h_grp, "Masses", FLOAT, *N, 1, *parts, N_total, offset, mass,
            COMPULSORY);
  readArray(h_grp, "SmoothingLength", FLOAT, *N, 1, *parts, N_total, offset, h,
            COMPULSORY);
  readArray(h_grp, "InternalEnergy", FLOAT, *N, 1, *parts, N_total, offset, u,
            COMPULSORY);
  readArray(h_grp, "ParticleIDs", ULONGLONG, *N, 1, *parts, N_total, offset, id,
            COMPULSORY);
  readArray(h_grp, "TimeStep", FLOAT, *N, 1, *parts, N_total, offset, dt,
            OPTIONAL);
  readArray(h_grp, "Acceleration", FLOAT, *N, 3, *parts, N_total, offset, a,
            OPTIONAL);
  readArray(h_grp, "Density", FLOAT, *N, 1, *parts, N_total, offset, rho,
            OPTIONAL);
290
291
292
293

  /* Close particle group */
  H5Gclose(h_grp);

294
295
  /* Close property handler */
  H5Pclose(h_plist_id);
296
297
298

  /* Close file */
  H5Fclose(h_file);
299
300

  /* message("Done Reading particles..."); */
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
}

/*-----------------------------------------------------------------------------
 * Routines writing an output file
 *-----------------------------------------------------------------------------*/

/**
 * @brief Writes a data array in given HDF5 group.
 *
 * @param grp The group in which to write.
 * @param fileName The name of the file in which the data is written
 * @param xmfFile The FILE used to write the XMF description
 * @param name The name of the array to write.
 * @param type The #DATA_TYPE of the array.
 * @param N The number of particles to write.
 * @param dim The dimension of the data (1 for scalar, 3 for vector)
 * @param N_total Total number of particles across all cores
 * @param offset Offset in the array where this mpi task starts writing
319
320
 * @param part_c A (char*) pointer on the first occurence of the field of
 *interest in the parts array
321
322
 * @param us The UnitSystem currently in use
 * @param convFactor The UnitConversionFactor for this array
323
 *
324
325
 * @todo A better version using HDF5 hyperslabs to write the file directly from
 *the part array
326
327
328
329
 * will be written once the strucutres have been stabilized.
 *
 * Calls #error() if an error occurs.
 */
330
331
332
333
334
335
void writeArrayBackEnd(hid_t grp, char* fileName, FILE* xmfFile, char* name,
                       enum DATA_TYPE type, int N, int dim, long long N_total,
                       int mpi_rank, long long offset, char* part_c,
                       struct UnitSystem* us,
                       enum UnitConversionFactor convFactor) {
  hid_t h_data = 0, h_err = 0, h_memspace = 0, h_filespace = 0, h_plist_id = 0;
336
  hsize_t shape[2], shape_total[2], offsets[2];
337
  void* temp = 0;
338
  int i = 0, rank = 0;
339
340
341
342
  const size_t typeSize = sizeOfType(type);
  const size_t copySize = typeSize * dim;
  const size_t partSize = sizeof(struct part);
  char* temp_c = 0;
343
  char buffer[150];
344

345
346
347
348
  /* message("Writing '%s' array...", name); */

  /* Allocate temporary buffer */
  temp = malloc(N * dim * sizeOfType(type));
349
  if (temp == NULL) error("Unable to allocate memory for temporary buffer");
350
351
352

  /* Copy particle data to temporary buffer */
  temp_c = temp;
353
354
  for (i = 0; i < N; ++i)
    memcpy(&temp_c[i * copySize], part_c + i * partSize, copySize);
355
356
357

  /* Create data space */
  h_memspace = H5Screate(H5S_SIMPLE);
358
359
360
  if (h_memspace < 0) {
    error("Error while creating data space (memory) for field '%s'.", name);
  }
361
362

  h_filespace = H5Screate(H5S_SIMPLE);
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  if (h_filespace < 0) {
    error("Error while creating data space (file) for field '%s'.", name);
  }

  if (dim > 1) {
    rank = 2;
    shape[0] = N;
    shape[1] = dim;
    shape_total[0] = N_total;
    shape_total[1] = dim;
    offsets[0] = offset;
    offsets[1] = 0;
  } else {
    rank = 1;
    shape[0] = N;
    shape[1] = 0;
    shape_total[0] = N_total;
    shape_total[1] = 0;
    offsets[0] = offset;
    offsets[1] = 0;
  }

385
386
  /* Change shape of memory data space */
  h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
387
388
389
390
  if (h_err < 0) {
    error("Error while changing data space (memory) shape for field '%s'.",
          name);
  }
391
392
393

  /* Change shape of file data space */
  h_err = H5Sset_extent_simple(h_filespace, rank, shape_total, NULL);
394
395
396
397
  if (h_err < 0) {
    error("Error while changing data space (file) shape for field '%s'.", name);
  }

398
399
  /* Create dataset */
  h_data = H5Dcreate1(grp, name, hdf5Type(type), h_filespace, H5P_DEFAULT);
400
401
402
403
  if (h_data < 0) {
    error("Error while creating dataset '%s'.", name);
  }

404
405
406
407
408
409
410
411
412
  H5Sclose(h_filespace);
  h_filespace = H5Dget_space(h_data);
  H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);

  /* Create property list for collective dataset write.    */
  h_plist_id = H5Pcreate(H5P_DATASET_XFER);
  H5Pset_dxpl_mpio(h_plist_id, H5FD_MPIO_COLLECTIVE);

  /* Write temporary buffer to HDF5 dataspace */
413
414
415
416
417
  h_err = H5Dwrite(h_data, hdf5Type(type), h_memspace, h_filespace, h_plist_id,
                   temp);
  if (h_err < 0) {
    error("Error while writing data array '%s'.", name);
  }
418
419

  /* Write XMF description for this data set */
420
  if (mpi_rank == 0) writeXMFline(xmfFile, fileName, name, N_total, dim, type);
421

422
  /* Write unit conversion factors for this data set */
423
424
425
426
427
428
429
  conversionString(buffer, us, convFactor);
  writeAttribute_d(h_data, "CGS conversion factor",
                   conversionFactor(us, convFactor));
  writeAttribute_f(h_data, "h-scale exponant", hFactor(us, convFactor));
  writeAttribute_f(h_data, "a-scale exponant", aFactor(us, convFactor));
  writeAttribute_s(h_data, "Conversion factor", buffer);

430
431
432
433
434
  /* Free and close everything */
  free(temp);
  H5Dclose(h_data);
  H5Pclose(h_plist_id);
  H5Sclose(h_memspace);
435
  H5Sclose(h_filespace);
436
437
438
}

/**
439
 * @brief A helper macro to call the writeArrayBackEnd function more easily.
440
441
442
443
444
445
446
447
448
 *
 * @param grp The group in which to write.
 * @param fileName The name of the file in which the data is written
 * @param xmfFile The FILE used to write the XMF description
 * @param name The name of the array to write.
 * @param type The #DATA_TYPE of the array.
 * @param N The number of particles to write from this core.
 * @param dim The dimension of the data (1 for scalar, 3 for vector)
 * @param N_total Total number of particles across all cores
449
 * @param mpi_rank The MPI task rank calling the function
450
 * @param offset Offset in the array where this mpi task starts writing
451
452
 * @param part A (char*) pointer on the first occurence of the field of interest
 *in the parts array
453
 * @param field The name (code name) of the field to read from.
454
455
 * @param us The UnitSystem currently in use
 * @param convFactor The UnitConversionFactor for this array
456
457
 *
 */
458
459
460
461
462
#define writeArray(grp, fileName, xmfFile, name, type, N, dim, part, N_total, \
                   mpi_rank, offset, field, us, convFactor)                   \
  writeArrayBackEnd(grp, fileName, xmfFile, name, type, N, dim, N_total,      \
                    mpi_rank, offset, (char*)(&(part[0]).field), us,          \
                    convFactor)
463
464
465
466
467

/**
 * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
 *
 * @param e The engine containing all the system.
468
 * @param us The UnitSystem used for the conversion of units in the output
469
470
471
 *
 * Creates an HDF5 output file and writes the particles contained
 * in the engine. If such a file already exists, it is erased and replaced
472
 * by the new one.
473
474
475
476
477
 * The companion XMF file is also updated accordingly.
 *
 * Calls #error() if an error occurs.
 *
 */
478
479
480
481
482
void write_output_parallel(struct engine* e, struct UnitSystem* us,
                           int mpi_rank, int mpi_size, MPI_Comm comm,
                           MPI_Info info) {

  hid_t h_file = 0, h_grp = 0;
483
484
  int N = e->s->nr_parts;
  int periodic = e->s->periodic;
485
486
487
  unsigned int numParticles[6] = {N, 0};
  unsigned int numParticlesHighWord[6] = {0};
  unsigned int flagEntropy[6] = {0};
488
489
  long long N_total = 0, offset = 0;
  double offset_d = 0., N_d = 0., N_total_d = 0.;
490
491
492
493
  int numFiles = 1;
  struct part* parts = e->s->parts;
  FILE* xmfFile = 0;
  static int outputCount = 0;
494

495
496
497
498
499
  /* File name */
  char fileName[200];
  sprintf(fileName, "output_%03i.hdf5", outputCount);

  /* First time, we need to create the XMF file */
500
501
  if (outputCount == 0 && mpi_rank == 0) createXMFfile();

502
  /* Prepare the XMF file for the new entry */
503
  if (mpi_rank == 0) xmfFile = prepareXMFfile();
504

505
  /* Open HDF5 file */
506
507
508
  hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
  H5Pset_fapl_mpio(plist_id, comm, info);
  h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
509
510
511
  if (h_file < 0) {
    error("Error while opening file '%s'.", fileName);
  }
512
513

  /* Compute offset in the file and total number of particles */
514
515
516
517
  /* Done using double to allow for up to 2^50=10^15 particles */
  N_d = (double)N;
  MPI_Exscan(&N_d, &offset_d, 1, MPI_DOUBLE, MPI_SUM, comm);
  N_total_d = offset_d + N_d;
518
519
520
521
522
523
524
  MPI_Bcast(&N_total_d, 1, MPI_DOUBLE, mpi_size - 1, comm);
  if (N_total_d > 1.e15)
    error(
        "Error while computing the offest for parallel output: Simulation has "
        "more than 10^15 particles.\n");
  N_total = (long long)N_total_d;
  offset = (long long)offset_d;
525

526
  /* Write the part of the XMF file corresponding to this specific output */
527
  if (mpi_rank == 0) writeXMFheader(xmfFile, N_total, fileName, e->time);
528

529
530
531
  /* Open header to write simulation properties */
  /* message("Writing runtime parameters..."); */
  h_grp = H5Gcreate1(h_file, "/RuntimePars", 0);
532
  if (h_grp < 0) error("Error while creating runtime parameters group\n");
533
534
535
536
537
538

  /* Write the relevant information */
  writeAttribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);

  /* Close runtime parameters */
  H5Gclose(h_grp);
539

540
541
542
  /* Open header to write simulation properties */
  /* message("Writing file header..."); */
  h_grp = H5Gcreate1(h_file, "/Header", 0);
543
544
  if (h_grp < 0) error("Error while creating file header\n");

545
  /* Print the relevant information and print status */
Pedro Gonnet's avatar
Pedro Gonnet committed
546
  writeAttribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
547
548
549
550
  writeAttribute(h_grp, "NumPart_ThisFile", UINT, numParticles, 6);
  writeAttribute(h_grp, "Time", DOUBLE, &e->time, 1);

  /* GADGET-2 legacy values */
551
  numParticles[0] = (unsigned int)N_total;
552
  writeAttribute(h_grp, "NumPart_Total", UINT, numParticles, 6);
553
554
555
  numParticlesHighWord[0] = (unsigned int)(N_total >> 32);
  writeAttribute(h_grp, "NumPart_Total_HighWord", UINT, numParticlesHighWord,
                 6);
556
557
  double MassTable[6] = {0., 0., 0., 0., 0., 0.};
  writeAttribute(h_grp, "MassTable", DOUBLE, MassTable, 6);
558
  writeAttribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy, 6);
559
560
  writeAttribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);

561
562
563
  /* Close header */
  H5Gclose(h_grp);

564
  /* Print the SPH parameters */
565
  writeSPHflavour(h_file);
566

567
568
  /* Print the system of Units */
  writeUnitSystem(h_file, us);
569

570
571
572
  /* Create SPH particles group */
  /* message("Writing particle arrays..."); */
  h_grp = H5Gcreate1(h_file, "/PartType0", 0);
573
  if (h_grp < 0) error("Error while creating particle group.\n");
574
575

  /* Write arrays */
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
  writeArray(h_grp, fileName, xmfFile, "Coordinates", DOUBLE, N, 3, parts,
             N_total, mpi_rank, offset, x, us, UNIT_CONV_LENGTH);
  writeArray(h_grp, fileName, xmfFile, "Velocities", FLOAT, N, 3, parts,
             N_total, mpi_rank, offset, v, us, UNIT_CONV_SPEED);
  writeArray(h_grp, fileName, xmfFile, "Masses", FLOAT, N, 1, parts, N_total,
             mpi_rank, offset, mass, us, UNIT_CONV_MASS);
  writeArray(h_grp, fileName, xmfFile, "SmoothingLength", FLOAT, N, 1, parts,
             N_total, mpi_rank, offset, h, us, UNIT_CONV_LENGTH);
  writeArray(h_grp, fileName, xmfFile, "InternalEnergy", FLOAT, N, 1, parts,
             N_total, mpi_rank, offset, u, us, UNIT_CONV_ENERGY_PER_UNIT_MASS);
  writeArray(h_grp, fileName, xmfFile, "ParticleIDs", ULONGLONG, N, 1, parts,
             N_total, mpi_rank, offset, id, us, UNIT_CONV_NO_UNITS);
  writeArray(h_grp, fileName, xmfFile, "TimeStep", FLOAT, N, 1, parts, N_total,
             mpi_rank, offset, dt, us, UNIT_CONV_TIME);
  writeArray(h_grp, fileName, xmfFile, "Acceleration", FLOAT, N, 3, parts,
             N_total, mpi_rank, offset, a, us, UNIT_CONV_ACCELERATION);
  writeArray(h_grp, fileName, xmfFile, "Density", FLOAT, N, 1, parts, N_total,
             mpi_rank, offset, rho, us, UNIT_CONV_DENSITY);
594
595
596
597
598

  /* Close particle group */
  H5Gclose(h_grp);

  /* Write LXMF file descriptor */
599
  if (mpi_rank == 0) writeXMFfooter(xmfFile);
600
601
602

  /* message("Done writing particles..."); */

603
604
605
  /* Close property descriptor */
  H5Pclose(plist_id);

606
607
608
609
610
611
  /* Close file */
  H5Fclose(h_file);

  ++outputCount;
}

612
#endif /* HAVE_HDF5 */