serial_io.c 35.5 KB
Newer Older
1
2
/*******************************************************************************
 * This file is part of SWIFT.
3
 * Copyright (c) 2012 Pedro Gonnet (pedro.gonnet@durham.ac.uk),
4
 *                    Matthieu Schaller (matthieu.schaller@durham.ac.uk).
5
 *
6
7
8
9
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
10
 *
11
12
13
14
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
15
 *
16
17
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18
 *
19
20
21
22
23
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

24
#if defined(HAVE_HDF5) && defined(WITH_MPI) && !defined(HAVE_PARALLEL_HDF5)
25
26

/* Some standard headers. */
27
28
#include <hdf5.h>
#include <math.h>
29
#include <mpi.h>
30
#include <stddef.h>
31
32
33
34
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

35
36
37
38
/* This object's header. */
#include "serial_io.h"

/* Local includes. */
39
#include "common_io.h"
40
#include "dimension.h"
41
#include "engine.h"
42
#include "error.h"
43
#include "gravity_io.h"
44
#include "gravity_properties.h"
45
#include "hydro_io.h"
46
#include "hydro_properties.h"
47
#include "io_properties.h"
48
49
#include "kernel_hydro.h"
#include "part.h"
Matthieu Schaller's avatar
Typos    
Matthieu Schaller committed
50
#include "stars_io.h"
51
#include "units.h"
52
#include "xmf.h"
53
54
55
56
57
58
59
60
61

/*-----------------------------------------------------------------------------
 * Routines reading an IC file
 *-----------------------------------------------------------------------------*/

/**
 * @brief Reads a data array from a given HDF5 group.
 *
 * @param grp The group from which to read.
62
63
64
65
66
67
 * @param props The #io_props of the field to read
 * @param N The number of particles to read on this rank.
 * @param N_total The total number of particles on all ranks.
 * @param offset The offset position where this rank starts reading.
 * @param internal_units The #unit_system used internally
 * @param ic_units The #unit_system used in the ICs
68
 *
69
 * @todo A better version using HDF5 hyper-slabs to read the file directly into
70
 * the part array will be written once the structures have been stabilized.
71
 */
72
73
void readArray(hid_t grp, const struct io_props props, size_t N,
               long long N_total, long long offset,
74
75
               const struct unit_system* internal_units,
               const struct unit_system* ic_units) {
76

77
  const size_t typeSize = io_sizeof_type(props.type);
78
79
  const size_t copySize = typeSize * props.dimension;
  const size_t num_elements = N * props.dimension;
80
81

  /* Check whether the dataspace exists or not */
82
  const htri_t exist = H5Lexists(grp, props.name, 0);
83
  if (exist < 0) {
84
    error("Error while checking the existence of data set '%s'.", props.name);
85
  } else if (exist == 0) {
86
87
    if (props.importance == COMPULSORY) {
      error("Compulsory data set '%s' not present in the file.", props.name);
88
    } else {
89
90
      for (size_t i = 0; i < N; ++i)
        memset(props.field + i * props.partSize, 0, copySize);
91
      return;
92
    }
93
  }
94

95
96
97
  /* message( "Reading %s '%s' array...", importance == COMPULSORY ? */
  /* 	   "compulsory": "optional  ", name); */
  /* fflush(stdout); */
98
99

  /* Open data space */
100
101
  const hid_t h_data = H5Dopen(grp, props.name, H5P_DEFAULT);
  if (h_data < 0) error("Error while opening data space '%s'.", props.name);
102
103

  /* Check data type */
104
  const hid_t h_type = H5Dget_type(h_data);
105
  if (h_type < 0) error("Unable to retrieve data type from the file");
106
  /* if (!H5Tequal(h_type, hdf5_type(type))) */
107
  /*   error("Non-matching types between the code and the file"); */
108

109
  /* Allocate temporary buffer */
110
  void* temp = malloc(num_elements * typeSize);
111
  if (temp == NULL) error("Unable to allocate memory for temporary buffer");
112

113
  /* Prepare information for hyper-slab */
114
115
116
  hsize_t shape[2], offsets[2];
  int rank;
  if (props.dimension > 1) {
117
118
    rank = 2;
    shape[0] = N;
119
    shape[1] = props.dimension;
120
121
122
    offsets[0] = offset;
    offsets[1] = 0;
  } else {
123
    rank = 2;
124
    shape[0] = N;
125
    shape[1] = 1;
126
127
128
    offsets[0] = offset;
    offsets[1] = 0;
  }
129
130

  /* Create data space in memory */
131
  const hid_t h_memspace = H5Screate_simple(rank, shape, NULL);
132

133
  /* Select hyper-slab in file */
134
  const hid_t h_filespace = H5Dget_space(h_data);
135
136
  H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);

137
138
139
  /* Read HDF5 dataspace in temporary buffer */
  /* Dirty version that happens to work for vectors but should be improved */
  /* Using HDF5 dataspaces would be better */
140
  const hid_t h_err = H5Dread(h_data, io_hdf5_type(props.type), h_memspace,
141
                              h_filespace, H5P_DEFAULT, temp);
142
  if (h_err < 0) {
143
144
145
146
147
148
149
150
    error("Error while reading data array '%s'.", props.name);
  }

  /* Unit conversion if necessary */
  const double factor =
      units_conversion_factor(ic_units, internal_units, props.units);
  if (factor != 1. && exist != 0) {

Matthieu Schaller's avatar
Matthieu Schaller committed
151
    /* message("Converting ! factor=%e", factor); */
152

153
    if (io_is_double_precision(props.type)) {
154
155
156
157
158
159
      double* temp_d = temp;
      for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
    } else {
      float* temp_f = temp;
      for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
    }
160
  }
161
162

  /* Copy temporary buffer to particle data */
163
164
165
  char* temp_c = temp;
  for (size_t i = 0; i < N; ++i)
    memcpy(props.field + i * props.partSize, &temp_c[i * copySize], copySize);
166

167
168
  /* Free and close everything */
  free(temp);
169
170
  H5Sclose(h_filespace);
  H5Sclose(h_memspace);
171
172
173
174
175
176
177
  H5Tclose(h_type);
  H5Dclose(h_data);
}

/*-----------------------------------------------------------------------------
 * Routines writing an output file
 *-----------------------------------------------------------------------------*/
178

179
180
181
void prepareArray(const struct engine* e, hid_t grp, char* fileName,
                  FILE* xmfFile, char* partTypeGroupName,
                  const struct io_props props, unsigned long long N_total,
182
183
                  const struct unit_system* internal_units,
                  const struct unit_system* snapshot_units) {
184
185

  /* Create data space */
186
  const hid_t h_space = H5Screate(H5S_SIMPLE);
187
  if (h_space < 0) {
188
    error("Error while creating data space for field '%s'.", props.name);
189
190
  }

191
192
193
194
  int rank = 0;
  hsize_t shape[2];
  hsize_t chunk_shape[2];
  if (props.dimension > 1) {
195
196
    rank = 2;
    shape[0] = N_total;
197
    shape[1] = props.dimension;
198
    chunk_shape[0] = 1 << 16; /* Just a guess...*/
199
    chunk_shape[1] = props.dimension;
200
201
202
203
  } else {
    rank = 1;
    shape[0] = N_total;
    shape[1] = 0;
204
205
    chunk_shape[0] = 1 << 16; /* Just a guess...*/
    chunk_shape[1] = 0;
206
207
  }

208
  /* Make sure the chunks are not larger than the dataset */
209
  if (chunk_shape[0] > N_total) chunk_shape[0] = N_total;
210

211
  /* Change shape of data space */
212
  hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
213
  if (h_err < 0) {
214
    error("Error while changing data space shape for field '%s'.", props.name);
215
216
  }

217
  /* Dataset properties */
218
  const hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
219
220
221
222

  /* Set chunk size */
  h_err = H5Pset_chunk(h_prop, rank, chunk_shape);
  if (h_err < 0) {
223
    error("Error while setting chunk size (%llu, %llu) for field '%s'.",
224
          chunk_shape[0], chunk_shape[1], props.name);
225
226
227
  }

  /* Impose data compression */
228
  if (e->snapshotCompression > 0) {
229
230
231
    h_err = H5Pset_deflate(h_prop, e->snapshotCompression);
    if (h_err < 0) {
      error("Error while setting compression options for field '%s'.",
232
            props.name);
233
    }
234
235
  }

236
  /* Create dataset */
237
238
  const hid_t h_data = H5Dcreate(grp, props.name, io_hdf5_type(props.type),
                                 h_space, H5P_DEFAULT, h_prop, H5P_DEFAULT);
239
  if (h_data < 0) {
240
    error("Error while creating dataspace '%s'.", props.name);
241
  }
242
243

  /* Write XMF description for this data set */
244
245
  xmf_write_line(xmfFile, fileName, partTypeGroupName, props.name, N_total,
                 props.dimension, props.type);
246
247

  /* Write unit conversion factors for this data set */
248
249
  char buffer[FIELD_BUFFER_SIZE];
  units_cgs_conversion_string(buffer, snapshot_units, props.units);
250
251
252
253
254
255
256
257
  io_write_attribute_d(
      h_data, "CGS conversion factor",
      units_cgs_conversion_factor(snapshot_units, props.units));
  io_write_attribute_f(h_data, "h-scale exponent",
                       units_h_factor(snapshot_units, props.units));
  io_write_attribute_f(h_data, "a-scale exponent",
                       units_a_factor(snapshot_units, props.units));
  io_write_attribute_s(h_data, "Conversion factor", buffer);
258

259
  /* Close everything */
260
  H5Pclose(h_prop);
261
262
263
264
  H5Dclose(h_data);
  H5Sclose(h_space);
}

265
266
267
/**
 * @brief Writes a data array in given HDF5 group.
 *
268
 * @param e The #engine we are writing from.
269
 * @param grp The group in which to write.
270
271
 * @param fileName The name of the file in which the data is written
 * @param xmfFile The FILE used to write the XMF description
272
 * @param partTypeGroupName The name of the group containing the particles in
273
274
 * the HDF5 file.
 * @param props The #io_props of the field to read
275
 * @param N The number of particles to write.
276
277
278
279
280
281
282
283
 * @param N_total The total number of particles on all ranks.
 * @param offset The offset position where this rank starts writing.
 * @param mpi_rank The MPI rank of this node
 * @param internal_units The #unit_system used internally
 * @param snapshot_units The #unit_system used in the snapshots
 *
 * @todo A better version using HDF5 hyper-slabs to write the file directly from
 * the part array will be written once the structures have been stabilized.
284
 */
285
286
287
288
void writeArray(const struct engine* e, hid_t grp, char* fileName,
                FILE* xmfFile, char* partTypeGroupName,
                const struct io_props props, size_t N, long long N_total,
                int mpi_rank, long long offset,
289
290
                const struct unit_system* internal_units,
                const struct unit_system* snapshot_units) {
291

292
  const size_t typeSize = io_sizeof_type(props.type);
293
  const size_t num_elements = N * props.dimension;
294

Matthieu Schaller's avatar
Matthieu Schaller committed
295
  /* message("Writing '%s' array...", props.name); */
296

297
  /* Prepare the arrays in the file */
Matthieu Schaller's avatar
Matthieu Schaller committed
298
  if (mpi_rank == 0)
299
    prepareArray(e, grp, fileName, xmfFile, partTypeGroupName, props, N_total,
300
                 internal_units, snapshot_units);
301

302
  /* Allocate temporary buffer */
303
  void* temp = NULL;
304
  if (posix_memalign((void**)&temp, IO_BUFFER_ALIGNMENT,
305
                     num_elements * typeSize) != 0)
306
    error("Unable to allocate temporary i/o buffer");
307

308
309
  /* Copy the particle data to the temporary buffer */
  io_copy_temp_buffer(temp, e, props, N, internal_units, snapshot_units);
310

311
  /* Construct information for the hyper-slab */
312
313
314
315
  int rank;
  hsize_t shape[2];
  hsize_t offsets[2];
  if (props.dimension > 1) {
316
317
    rank = 2;
    shape[0] = N;
318
    shape[1] = props.dimension;
319
320
321
322
323
324
325
326
327
    offsets[0] = offset;
    offsets[1] = 0;
  } else {
    rank = 1;
    shape[0] = N;
    shape[1] = 0;
    offsets[0] = offset;
    offsets[1] = 0;
  }
328
329

  /* Create data space in memory */
330
  const hid_t h_memspace = H5Screate(H5S_SIMPLE);
331
  if (h_memspace < 0)
332
333
    error("Error while creating data space (memory) for field '%s'.",
          props.name);
334
335

  /* Change shape of memory data space */
336
  hid_t h_err = H5Sset_extent_simple(h_memspace, rank, shape, NULL);
337
338
  if (h_err < 0)
    error("Error while changing data space (memory) shape for field '%s'.",
339
          props.name);
340

341
  /* Open pre-existing data set */
342
343
  const hid_t h_data = H5Dopen(grp, props.name, H5P_DEFAULT);
  if (h_data < 0) error("Error while opening dataset '%s'.", props.name);
344

345
  /* Select data space in that data set */
346
  const hid_t h_filespace = H5Dget_space(h_data);
347
  H5Sselect_hyperslab(h_filespace, H5S_SELECT_SET, offsets, NULL, shape, NULL);
348

349
  /* Write temporary buffer to HDF5 dataspace */
350
  h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_memspace, h_filespace,
351
352
                   H5P_DEFAULT, temp);
  if (h_err < 0) error("Error while writing data array '%s'.", props.name);
353

354
355
356
  /* Free and close everything */
  free(temp);
  H5Dclose(h_data);
357
358
  H5Sclose(h_memspace);
  H5Sclose(h_filespace);
359
360
}

361
362
363
364
/**
 * @brief Reads an HDF5 initial condition file (GADGET-3 type)
 *
 * @param fileName The file to read.
365
 * @param internal_units The system units used internally
366
 * @param dim (output) The dimension of the volume read from the file.
367
368
 * @param parts (output) The array of #part (gas particles) read from the file.
 * @param gparts (output) The array of #gpart read from the file.
369
 * @param sparts (output) Array of #spart particles.
370
371
 * @param Ngas (output) The number of #part read from the file on that node.
 * @param Ngparts (output) The number of #gpart read from the file on that node.
372
 * @param Nstars (output) The number of #spart read from the file on that node.
373
 * @param periodic (output) 1 if the volume is periodic, 0 if not.
374
375
 * @param flag_entropy (output) 1 if the ICs contained Entropy in the
 * InternalEnergy field
376
377
378
 * @param with_hydro Are we reading gas particles ?
 * @param with_gravity Are we reading/creating #gpart arrays ?
 * @param with_stars Are we reading star particles ?
379
380
381
382
 * @param mpi_rank The MPI rank of this node
 * @param mpi_size The number of MPI ranks
 * @param comm The MPI communicator
 * @param info The MPI information object
383
 * @param n_threads The number of threads to use for local operations.
384
 * @param dry_run If 1, don't read the particle. Only allocates the arrays.
385
386
387
388
389
390
391
392
393
 *
 * Opens the HDF5 file fileName and reads the particles contained
 * in the parts array. N is the returned number of particles found
 * in the file.
 *
 * @warning Can not read snapshot distributed over more than 1 file !!!
 * @todo Read snapshots distributed in more than one file.
 *
 */
394
void read_ic_serial(char* fileName, const struct unit_system* internal_units,
395
                    double dim[3], struct part** parts, struct gpart** gparts,
396
397
398
399
                    struct spart** sparts, size_t* Ngas, size_t* Ngparts,
                    size_t* Nstars, int* periodic, int* flag_entropy,
                    int with_hydro, int with_gravity, int with_stars,
                    int mpi_rank, int mpi_size, MPI_Comm comm, MPI_Info info,
400
                    int n_threads, int dry_run) {
401

402
403
  hid_t h_file = 0, h_grp = 0;
  /* GADGET has only cubic boxes (in cosmological mode) */
404
405
  double boxSize[3] = {0.0, -1.0, -1.0};
  /* GADGET has 6 particle types. We only keep the type 0 & 1 for now*/
406
407
408
409
410
  long long numParticles[swift_type_count] = {0};
  long long numParticles_highWord[swift_type_count] = {0};
  size_t N[swift_type_count] = {0};
  long long N_total[swift_type_count] = {0};
  long long offset[swift_type_count] = {0};
411
  int dimension = 3; /* Assume 3D if nothing is specified */
412
  size_t Ndm = 0;
413
  struct unit_system* ic_units = malloc(sizeof(struct unit_system));
414
415
416
417
418
419
420
421
422
423
424
425

  /* First read some information about the content */
  if (mpi_rank == 0) {

    /* Open file */
    /* message("Opening file '%s' as IC.", fileName); */
    h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
    if (h_file < 0)
      error("Error while opening file '%s' for initial read.", fileName);

    /* Open header to read simulation properties */
    /* message("Reading runtime parameters..."); */
426
    h_grp = H5Gopen(h_file, "/RuntimePars", H5P_DEFAULT);
427
428
429
    if (h_grp < 0) error("Error while opening runtime parameters\n");

    /* Read the relevant information */
430
    io_read_attribute(h_grp, "PeriodicBoundariesOn", INT, periodic);
431
432
433
434
435
436

    /* Close runtime parameters */
    H5Gclose(h_grp);

    /* Open header to read simulation properties */
    /* message("Reading file header..."); */
437
    h_grp = H5Gopen(h_file, "/Header", H5P_DEFAULT);
438
439
    if (h_grp < 0) error("Error while opening file header\n");

440
441
442
443
    /* Check the dimensionality of the ICs (if the info exists) */
    const hid_t hid_dim = H5Aexists(h_grp, "Dimension");
    if (hid_dim < 0)
      error("Error while testing existance of 'Dimension' attribute");
444
    if (hid_dim > 0) io_read_attribute(h_grp, "Dimension", INT, &dimension);
445
446
447
448
    if (dimension != hydro_dimension)
      error("ICs dimensionality (%dD) does not match code dimensionality (%dD)",
            dimension, (int)hydro_dimension);

449
    /* Read the relevant information and print status */
450
    int flag_entropy_temp[6];
451
    io_read_attribute(h_grp, "Flag_Entropy_ICs", INT, flag_entropy_temp);
452
    *flag_entropy = flag_entropy_temp[0];
453
454
455
456
    io_read_attribute(h_grp, "BoxSize", DOUBLE, boxSize);
    io_read_attribute(h_grp, "NumPart_Total", LONGLONG, numParticles);
    io_read_attribute(h_grp, "NumPart_Total_HighWord", LONGLONG,
                      numParticles_highWord);
457

458
    for (int ptype = 0; ptype < swift_type_count; ++ptype)
Matthieu Schaller's avatar
Matthieu Schaller committed
459
460
      N_total[ptype] =
          (numParticles[ptype]) + (numParticles_highWord[ptype] << 32);
461

462
    /* Get the box size if not cubic */
463
464
465
466
    dim[0] = boxSize[0];
    dim[1] = (boxSize[1] < 0) ? boxSize[0] : boxSize[1];
    dim[2] = (boxSize[2] < 0) ? boxSize[0] : boxSize[2];

467
468
469
470
471
472
    /* Change box size in the 1D and 2D case */
    if (hydro_dimension == 2)
      dim[2] = min(dim[0], dim[1]);
    else if (hydro_dimension == 1)
      dim[2] = dim[1] = dim[0];

473
474
475
476
477
478
479
480
481
    /* message("Found %lld particles in a %speriodic box of size [%f %f %f].",
     */
    /* 	    N_total, (periodic ? "": "non-"), dim[0], dim[1], dim[2]); */

    fflush(stdout);

    /* Close header */
    H5Gclose(h_grp);

482
483
    /* Read the unit system used in the ICs */
    if (ic_units == NULL) error("Unable to allocate memory for IC unit system");
484
    io_read_unit_system(h_file, ic_units, mpi_rank);
485
486
487

    if (units_are_equal(ic_units, internal_units)) {

Matthieu Schaller's avatar
Matthieu Schaller committed
488
      message("IC and internal units match. No conversion needed.");
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513

    } else {

      message("Conversion needed from:");
      message("(ICs) Unit system: U_M =      %e g.", ic_units->UnitMass_in_cgs);
      message("(ICs) Unit system: U_L =      %e cm.",
              ic_units->UnitLength_in_cgs);
      message("(ICs) Unit system: U_t =      %e s.", ic_units->UnitTime_in_cgs);
      message("(ICs) Unit system: U_I =      %e A.",
              ic_units->UnitCurrent_in_cgs);
      message("(ICs) Unit system: U_T =      %e K.",
              ic_units->UnitTemperature_in_cgs);
      message("to:");
      message("(internal) Unit system: U_M = %e g.",
              internal_units->UnitMass_in_cgs);
      message("(internal) Unit system: U_L = %e cm.",
              internal_units->UnitLength_in_cgs);
      message("(internal) Unit system: U_t = %e s.",
              internal_units->UnitTime_in_cgs);
      message("(internal) Unit system: U_I = %e A.",
              internal_units->UnitCurrent_in_cgs);
      message("(internal) Unit system: U_T = %e K.",
              internal_units->UnitTemperature_in_cgs);
    }

514
515
516
517
    /* Close file */
    H5Fclose(h_file);
  }

518
  /* Convert the dimensions of the box */
Matthieu Schaller's avatar
Matthieu Schaller committed
519
520
521
  for (int j = 0; j < 3; j++)
    dim[j] *=
        units_conversion_factor(ic_units, internal_units, UNIT_CONV_LENGTH);
522

523
  /* Now need to broadcast that information to all ranks. */
524
  MPI_Bcast(flag_entropy, 1, MPI_INT, 0, comm);
525
  MPI_Bcast(periodic, 1, MPI_INT, 0, comm);
526
  MPI_Bcast(&N_total, swift_type_count, MPI_LONG_LONG_INT, 0, comm);
527
  MPI_Bcast(dim, 3, MPI_DOUBLE, 0, comm);
528
  MPI_Bcast(ic_units, sizeof(struct unit_system), MPI_BYTE, 0, comm);
529
530

  /* Divide the particles among the tasks. */
531
  for (int ptype = 0; ptype < swift_type_count; ++ptype) {
532
533
534
    offset[ptype] = mpi_rank * N_total[ptype] / mpi_size;
    N[ptype] = (mpi_rank + 1) * N_total[ptype] / mpi_size - offset[ptype];
  }
535

536
  /* Allocate memory to store SPH particles */
537
538
539
540
541
542
543
544
545
546
  if (with_hydro) {
    *Ngas = N[0];
    if (posix_memalign((void*)parts, part_align, *Ngas * sizeof(struct part)) !=
        0)
      error("Error while allocating memory for SPH particles");
    bzero(*parts, *Ngas * sizeof(struct part));
  }

  /* Allocate memory to store star particles */
  if (with_stars) {
547
    *Nstars = N[swift_type_star];
548
549
550
551
552
553
554
555
556
    if (posix_memalign((void*)sparts, spart_align,
                       *Nstars * sizeof(struct spart)) != 0)
      error("Error while allocating memory for star particles");
    bzero(*sparts, *Nstars * sizeof(struct spart));
  }

  /* Allocate memory to store all gravity  particles */
  if (with_gravity) {
    Ndm = N[1];
557
558
559
    *Ngparts = (with_hydro ? N[swift_type_gas] : 0) +
               N[swift_type_dark_matter] +
               (with_stars ? N[swift_type_star] : 0);
560
561
562
563
564
    if (posix_memalign((void*)gparts, gpart_align,
                       *Ngparts * sizeof(struct gpart)) != 0)
      error("Error while allocating memory for gravity particles");
    bzero(*gparts, *Ngparts * sizeof(struct gpart));
  }
Matthieu Schaller's avatar
Matthieu Schaller committed
565

566
567
  /* message("Allocated %8.2f MB for particles.", *N * sizeof(struct part) / */
  /* 	  (1024.*1024.)); */
568
569
  /* message("BoxSize = %lf", dim[0]); */
  /* message("NumPart = [%zd, %zd] Total = %zd", *Ngas, Ndm, *Ngparts); */
Matthieu Schaller's avatar
Matthieu Schaller committed
570

571
572
573
  /* For dry runs, only need to do this on rank 0 */
  if (dry_run) mpi_size = 1;

574
  /* Now loop over ranks and read the data */
575
  for (int rank = 0; rank < mpi_size; ++rank) {
576
577
578

    /* Is it this rank's turn to read ? */
    if (rank == mpi_rank) {
Matthieu Schaller's avatar
Matthieu Schaller committed
579

580
581
582
583
      h_file = H5Fopen(fileName, H5F_ACC_RDONLY, H5P_DEFAULT);
      if (h_file < 0)
        error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);

584
      /* Loop over all particle types */
585
      for (int ptype = 0; ptype < swift_type_count; ptype++) {
586

Matthieu Schaller's avatar
Matthieu Schaller committed
587
588
589
590
591
592
593
594
595
596
597
598
        /* Don't do anything if no particle of this kind */
        if (N[ptype] == 0) continue;

        /* Open the particle group in the file */
        char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
        snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
                 ptype);
        h_grp = H5Gopen(h_file, partTypeGroupName, H5P_DEFAULT);
        if (h_grp < 0) {
          error("Error while opening particle group %s.", partTypeGroupName);
        }

599
600
        int num_fields = 0;
        struct io_props list[100];
601
        size_t Nparticles = 0;
602

Matthieu Schaller's avatar
Matthieu Schaller committed
603
604
605
        /* Read particle fields into the particle structure */
        switch (ptype) {

606
          case swift_type_gas:
607
608
609
610
            if (with_hydro) {
              Nparticles = *Ngas;
              hydro_read_particles(*parts, list, &num_fields);
            }
Matthieu Schaller's avatar
Matthieu Schaller committed
611
612
            break;

613
          case swift_type_dark_matter:
614
615
616
617
618
619
            if (with_gravity) {
              Nparticles = Ndm;
              darkmatter_read_particles(*gparts, list, &num_fields);
            }
            break;

620
          case swift_type_star:
621
622
623
624
            if (with_stars) {
              Nparticles = *Nstars;
              star_read_particles(*sparts, list, &num_fields);
            }
Matthieu Schaller's avatar
Matthieu Schaller committed
625
626
627
            break;

          default:
628
629
630
            if (mpi_rank == 0)
              message("Particle Type %d not yet supported. Particles ignored",
                      ptype);
Matthieu Schaller's avatar
Matthieu Schaller committed
631
632
        }

633
634
635
        /* Read everything */
        if (!dry_run)
          for (int i = 0; i < num_fields; ++i)
636
            readArray(h_grp, list[i], Nparticles, N_total[ptype], offset[ptype],
637
638
                      internal_units, ic_units);

Matthieu Schaller's avatar
Matthieu Schaller committed
639
640
        /* Close particle group */
        H5Gclose(h_grp);
641
      }
Matthieu Schaller's avatar
Matthieu Schaller committed
642

643
644
645
646
647
648
649
650
      /* Close file */
      H5Fclose(h_file);
    }

    /* Wait for the read of the reading to complete */
    MPI_Barrier(comm);
  }

651
652
  /* Duplicate the parts for gravity */
  if (!dry_run && with_gravity) {
653

654
655
656
    /* Let's initialise a bit of thread parallelism here */
    struct threadpool tp;
    threadpool_init(&tp, n_threads);
657

658
659
660
661
    /* Prepare the DM particles */
    io_prepare_dm_gparts(&tp, *gparts, Ndm);

    /* Duplicate the hydro particles into gparts */
662
    if (with_hydro) io_duplicate_hydro_gparts(&tp, *parts, *gparts, *Ngas, Ndm);
663
664
665

    /* Duplicate the star particles into gparts */
    if (with_stars)
666
      io_duplicate_star_gparts(&tp, *sparts, *gparts, *Nstars, Ndm + *Ngas);
667
668
669

    threadpool_clean(&tp);
  }
670

671
  /* message("Done Reading particles..."); */
672
673
674

  /* Clean up */
  free(ic_units);
675
676
}

677
/**
678
 * @brief Writes an HDF5 output file (GADGET-3 type) with its XMF descriptor
679
 *
680
 * @param e The engine containing all the system.
681
 * @param baseName The common part of the snapshot file name.
682
683
 * @param internal_units The #unit_system used internally
 * @param snapshot_units The #unit_system used in the snapshots
684
685
686
687
 * @param mpi_rank The MPI rank of this node.
 * @param mpi_size The number of MPI ranks.
 * @param comm The MPI communicator.
 * @param info The MPI information object
688
 *
689
 * Creates an HDF5 output file and writes the particles contained
690
 * in the engine. If such a file already exists, it is erased and replaced
691
 * by the new one.
692
 * The companion XMF file is also updated accordingly.
693
694
695
696
 *
 * Calls #error() if an error occurs.
 *
 */
697
void write_output_serial(struct engine* e, const char* baseName,
698
699
                         const struct unit_system* internal_units,
                         const struct unit_system* snapshot_units, int mpi_rank,
700
701
                         int mpi_size, MPI_Comm comm, MPI_Info info) {

702
  hid_t h_file = 0, h_grp = 0;
703
  const size_t Ngas = e->s->nr_parts;
704
  const size_t Nstars = e->s->nr_sparts;
705
  const size_t Ntot = e->s->nr_gparts;
706
707
708
  int periodic = e->s->periodic;
  int numFiles = 1;
  struct part* parts = e->s->parts;
709
710
  struct gpart* gparts = e->s->gparts;
  struct gpart* dmparts = NULL;
711
  struct spart* sparts = e->s->sparts;
712
  static int outputCount = 0;
713
  FILE* xmfFile = 0;
714

715
  /* Number of unassociated gparts */
716
  const size_t Ndm = Ntot > 0 ? Ntot - (Ngas + Nstars) : 0;
Matthieu Schaller's avatar
Matthieu Schaller committed
717

718
  /* File name */
719
  char fileName[FILENAME_BUFFER_SIZE];
720
  snprintf(fileName, FILENAME_BUFFER_SIZE, "%s_%04i.hdf5", baseName,
721
           outputCount);
722

723
  /* Compute offset in the file and total number of particles */
724
725
726
727
728
  size_t N[swift_type_count] = {Ngas, Ndm, 0, 0, Nstars, 0};
  long long N_total[swift_type_count] = {0};
  long long offset[swift_type_count] = {0};
  MPI_Exscan(&N, &offset, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
  for (int ptype = 0; ptype < swift_type_count; ++ptype)
729
730
731
    N_total[ptype] = offset[ptype] + N[ptype];

  /* The last rank now has the correct N_total. Let's broadcast from there */
732
  MPI_Bcast(&N_total, 6, MPI_LONG_LONG_INT, mpi_size - 1, comm);
733

Matthieu Schaller's avatar
Matthieu Schaller committed
734
735
  /* Now everybody konws its offset and the total number of particles of each
   * type */
736

737
  /* Do common stuff first */
738
  if (mpi_rank == 0) {
739

740
    /* First time, we need to create the XMF file */
741
    if (outputCount == 0) xmf_create_file(baseName);
742

743
    /* Prepare the XMF file for the new entry */
744
    xmfFile = xmf_prepare_file(baseName);
745

746
    /* Write the part corresponding to this specific output */
747
    xmf_write_outputheader(xmfFile, fileName, e->time);
748

749
750
    /* Open file */
    /* message("Opening file '%s'.", fileName); */
751
752
753
754
    h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
    if (h_file < 0) {
      error("Error while opening file '%s'.", fileName);
    }
755

756
757
    /* Open header to write simulation properties */
    /* message("Writing runtime parameters..."); */
Matthieu Schaller's avatar
Matthieu Schaller committed
758
759
    h_grp = H5Gcreate(h_file, "/RuntimePars", H5P_DEFAULT, H5P_DEFAULT,
                      H5P_DEFAULT);
760
    if (h_grp < 0) error("Error while creating runtime parameters group\n");
761

762
    /* Write the relevant information */
763
    io_write_attribute(h_grp, "PeriodicBoundariesOn", INT, &periodic, 1);
764
765
766

    /* Close runtime parameters */
    H5Gclose(h_grp);
767

768
769
    /* Open header to write simulation properties */
    /* message("Writing file header..."); */
770
    h_grp = H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
771
772
    if (h_grp < 0) error("Error while creating file header\n");

773
    /* Print the relevant information and print status */
774
    io_write_attribute(h_grp, "BoxSize", DOUBLE, e->s->dim, 3);
775
    double dblTime = e->time;
776
    io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
777
    int dimension = (int)hydro_dimension;
778
    io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
779

780
    /* GADGET-2 legacy values */
781
    /* Number of particles of each type */
782
783
784
    unsigned int numParticles[swift_type_count] = {0};
    unsigned int numParticlesHighWord[swift_type_count] = {0};
    for (int ptype = 0; ptype < swift_type_count; ++ptype) {
Matthieu Schaller's avatar
Matthieu Schaller committed
785
786
787
      numParticles[ptype] = (unsigned int)N_total[ptype];
      numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
    }
788
789
790
791
792
793
    io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, N_total,
                       swift_type_count);
    io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
                       swift_type_count);
    io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT,
                       numParticlesHighWord, swift_type_count);
Matthieu Schaller's avatar
Matthieu Schaller committed
794
    double MassTable[6] = {0., 0., 0., 0., 0., 0.};
795
796
    io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count);
    unsigned int flagEntropy[swift_type_count] = {0};
797
    flagEntropy[0] = writeEntropyFlag();
798
799
800
    io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy,
                       swift_type_count);
    io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);
801
802
803
804

    /* Close header */
    H5Gclose(h_grp);

805
    /* Print the code version */
806
    io_write_code_description(h_file);
807

808
    /* Print the SPH parameters */
809
    if (e->policy & engine_policy_hydro) {
810
      h_grp = H5Gcreate(h_file, "/HydroScheme", H5P_DEFAULT, H5P_DEFAULT,
811
                        H5P_DEFAULT);
812
813
814
815
816
817
818
      if (h_grp < 0) error("Error while creating SPH group");
      hydro_props_print_snapshot(h_grp, e->hydro_properties);
      writeSPHflavour(h_grp);
      H5Gclose(h_grp);
    }

    /* Print the gravity parameters */
819
    if (e->policy & engine_policy_self_gravity) {
820
      h_grp = H5Gcreate(h_file, "/GravityScheme", H5P_DEFAULT, H5P_DEFAULT,
821
                        H5P_DEFAULT);
822
823
824
825
      if (h_grp < 0) error("Error while creating gravity group");
      gravity_props_print_snapshot(h_grp, e->gravity_properties);
      H5Gclose(h_grp);
    }
826
827
828
829
830
831
832

    /* Print the runtime parameters */
    h_grp =
        H5Gcreate(h_file, "/Parameters", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
    if (h_grp < 0) error("Error while creating parameters group");
    parser_write_params_to_hdf5(e->parameter_file, h_grp);
    H5Gclose(h_grp);
833

834
    /* Print the system of Units used in the spashot */
835
    io_write_unit_system(h_file, snapshot_units, "Units");
836
837

    /* Print the system of Units used internally */
838
    io_write_unit_system(h_file, internal_units, "InternalCodeUnits");
839

840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
    /* Tell the user if a conversion will be needed */
    if (e->verbose) {
      if (units_are_equal(snapshot_units, internal_units)) {

        message("Snapshot and internal units match. No conversion needed.");

      } else {

        message("Conversion needed from:");
        message("(Snapshot) Unit system: U_M =      %e g.",
                snapshot_units->UnitMass_in_cgs);
        message("(Snapshot) Unit system: U_L =      %e cm.",
                snapshot_units->UnitLength_in_cgs);
        message("(Snapshot) Unit system: U_t =      %e s.",
                snapshot_units->UnitTime_in_cgs);
        message("(Snapshot) Unit system: U_I =      %e A.",
                snapshot_units->UnitCurrent_in_cgs);
        message("(Snapshot) Unit system: U_T =      %e K.",
                snapshot_units->UnitTemperature_in_cgs);
        message("to:");
        message("(internal) Unit system: U_M = %e g.",
                internal_units->UnitMass_in_cgs);
        message("(internal) Unit system: U_L = %e cm.",
                internal_units->UnitLength_in_cgs);
        message("(internal) Unit system: U_t = %e s.",
                internal_units->UnitTime_in_cgs);
        message("(internal) Unit system: U_I = %e A.",
                internal_units->UnitCurrent_in_cgs);
        message("(internal) Unit system: U_T = %e K.",
                internal_units->UnitTemperature_in_cgs);
      }
    }

873
    /* Loop over all particle types */
874
    for (int ptype = 0; ptype < swift_type_count; ptype++) {
875

876
      /* Don't do anything if no particle of this kind */
877
      if (N_total[ptype] == 0) continue;
878

879
880
881
      /* Open the particle group in the file */
      char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
      snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
Matthieu Schaller's avatar
Matthieu Schaller committed
882
               ptype);
883
      h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
Matthieu Schaller's avatar
Matthieu Schaller committed
884
                        H5P_DEFAULT);
885
      if (h_grp < 0) {
Matthieu Schaller's avatar
Matthieu Schaller committed
886
        error("Error while creating particle group.\n");
887
      }
Matthieu Schaller's avatar
Matthieu Schaller committed
888

889
890
891
      /* Close particle group */
      H5Gclose(h_grp);
    }
Matthieu Schaller's avatar
Matthieu Schaller committed
892

893
894
895
    /* Close file */
    H5Fclose(h_file);
  }
896

897
  /* Now loop over ranks and write the data */
898
  for (int rank = 0; rank < mpi_size; ++rank) {
899
900

    /* Is it this rank's turn to write ? */
901
    if (rank == mpi_rank) {
902
903

      h_file = H5Fopen(fileName, H5F_ACC_RDWR, H5P_DEFAULT);
904
905
      if (h_file < 0)
        error("Error while opening file '%s' on rank %d.", fileName, mpi_rank);