Newer
Older

Matthieu Schaller
committed
/*******************************************************************************
* This file is part of SWIFT.
* Copyright (c) 2019 Matthieu Schaller (schaller@strw.leidenuniv.nl)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
******************************************************************************/
/* Config parameters. */
#include <config.h>

Matthieu Schaller
committed
#if defined(HAVE_HDF5) && defined(WITH_MPI)
/* Some standard headers. */
#include <hdf5.h>
#include <math.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>

Matthieu Schaller
committed
#include <time.h>
/* This object's header. */
#include "distributed_io.h"
/* Local includes. */
#include "black_holes_io.h"
#include "chemistry_io.h"
#include "common_io.h"
#include "dimension.h"
#include "engine.h"
#include "error.h"
#include "gravity_io.h"
#include "gravity_properties.h"
#include "hydro_io.h"
#include "hydro_properties.h"
#include "io_compression.h"

Matthieu Schaller
committed
#include "io_properties.h"
#include "memuse.h"
#include "output_list.h"
#include "output_options.h"

Matthieu Schaller
committed
#include "part.h"
#include "part_type.h"

Matthieu Schaller
committed
#include "star_formation_io.h"
#include "stars_io.h"
#include "tools.h"

Matthieu Schaller
committed
#include "units.h"

Matthieu Schaller
committed
#include "xmf.h"
/* Are we timing the i/o? */
// #define IO_SPEED_MEASUREMENT
/* Max number of entries that can be written for a given particle type */
static const int io_max_size_output_list = 100;

Matthieu Schaller
committed
/**
* @brief Writes a data array in given HDF5 group.
*
* @param e The #engine we are writing from.
* @param grp The group in which to write.
* @param fileName The name of the file in which the data is written
* @param partTypeGroupName The name of the group containing the particles in
* the HDF5 file.
* @param props The #io_props of the field to read
* @param N The number of particles to write.

Matthieu Schaller
committed
* @param lossy_compression Level of lossy compression to use for this field.

Matthieu Schaller
committed
* @param internal_units The #unit_system used internally
* @param snapshot_units The #unit_system used in the snapshots
*
* @todo A better version using HDF5 hyper-slabs to write the file directly from
* the part array will be written once the structures have been stabilized.
*/
void write_distributed_array(
const struct engine* e, hid_t grp, const char* fileName,
const char* partTypeGroupName, const struct io_props props, const size_t N,
const enum lossy_compression_schemes lossy_compression,
const struct unit_system* internal_units,
const struct unit_system* snapshot_units) {

Matthieu Schaller
committed
#ifdef IO_SPEED_MEASUREMENT
const ticks tic_total = getticks();
#endif

Matthieu Schaller
committed
const size_t typeSize = io_sizeof_type(props.type);
const size_t num_elements = N * props.dimension;
/* message("Writing '%s' array...", props.name); */
/* Allocate temporary buffer */
void* temp = NULL;
if (swift_memalign("writebuff", (void**)&temp, IO_BUFFER_ALIGNMENT,
num_elements * typeSize) != 0)
error("Unable to allocate temporary i/o buffer");
#ifdef IO_SPEED_MEASUREMENT
ticks tic = getticks();
#endif

Matthieu Schaller
committed
/* Copy the particle data to the temporary buffer */
io_copy_temp_buffer(temp, e, props, N, internal_units, snapshot_units);
#ifdef IO_SPEED_MEASUREMENT
if (engine_rank == IO_SPEED_MEASUREMENT || IO_SPEED_MEASUREMENT == -1)
message("Copying for '%s' took %.3f %s.", props.name,
clocks_from_ticks(getticks() - tic), clocks_getunit());
#endif

Matthieu Schaller
committed
/* Create data space */
hid_t h_space;
if (N > 0)
h_space = H5Screate(H5S_SIMPLE);
else
h_space = H5Screate(H5S_NULL);

Matthieu Schaller
committed
if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name);
/* Decide what chunk size to use based on compression */

Matthieu Schaller
committed
int rank;
hsize_t shape[2];
hsize_t chunk_shape[2];
if (props.dimension > 1) {
rank = 2;
shape[0] = N;
shape[1] = props.dimension;
chunk_shape[0] = 1 << log2_chunk_size;

Matthieu Schaller
committed
chunk_shape[1] = props.dimension;
} else {
rank = 1;
shape[0] = N;
shape[1] = 0;
chunk_shape[0] = 1 << log2_chunk_size;

Matthieu Schaller
committed
chunk_shape[1] = 0;
}
/* Make sure the chunks are not larger than the dataset */
if (chunk_shape[0] > N) chunk_shape[0] = N;
/* Change shape of data space */
hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, shape);
if (h_err < 0)
error("Error while changing data space shape for field '%s'.", props.name);
/* Dataset type */
hid_t h_type = H5Tcopy(io_hdf5_type(props.type));

Matthieu Schaller
committed
/* Dataset properties */

Matthieu Schaller
committed
/* Create filters and set compression level if we have something to write */

Matthieu Schaller
committed
char comp_buffer[32] = "None";

Matthieu Schaller
committed
/* Set chunk size */
h_err = H5Pset_chunk(h_prop, rank, chunk_shape);

Matthieu Schaller
committed
if (h_err < 0)
error("Error while setting chunk size (%llu, %llu) for field '%s'.",
(unsigned long long)chunk_shape[0],
(unsigned long long)chunk_shape[1], props.name);

Matthieu Schaller
committed
/* Are we imposing some form of lossy compression filter? */
if (lossy_compression != compression_write_lossless)
set_hdf5_lossy_compression(&h_prop, &h_type, lossy_compression,

Matthieu Schaller
committed
props.name, comp_buffer);
if (e->snapshot_compression > 0) {
h_err = H5Pset_shuffle(h_prop);
if (h_err < 0)
error("Error while setting shuffling options for field '%s'.",
props.name);
h_err = H5Pset_deflate(h_prop, e->snapshot_compression);
if (h_err < 0)
error("Error while setting compression options for field '%s'.",
props.name);
}
/* Impose check-sum to verify data corruption */
h_err = H5Pset_fletcher32(h_prop);
if (h_err < 0)
error("Error while setting checksum options for field '%s'.", props.name);

Matthieu Schaller
committed
}
/* Create dataset */
const hid_t h_data = H5Dcreate(grp, props.name, h_type, h_space, H5P_DEFAULT,
h_prop, H5P_DEFAULT);

Matthieu Schaller
committed
if (h_data < 0) error("Error while creating dataspace '%s'.", props.name);
#ifdef IO_SPEED_MEASUREMENT
tic = getticks();
#endif

Matthieu Schaller
committed
/* Write temporary buffer to HDF5 dataspace */
h_err = H5Dwrite(h_data, io_hdf5_type(props.type), h_space, H5S_ALL,
H5P_DEFAULT, temp);
if (h_err < 0) error("Error while writing data array '%s'.", props.name);
#ifdef IO_SPEED_MEASUREMENT
ticks toc = getticks();
float ms = clocks_from_ticks(toc - tic);
int megaBytes = N * props.dimension * typeSize / (1024 * 1024);
if (engine_rank == IO_SPEED_MEASUREMENT || IO_SPEED_MEASUREMENT == -1)
message(
"H5Dwrite for '%s' (%d MB) on rank %d took %.3f %s (speed = %f MB/s).",
props.name, megaBytes, engine_rank, ms, clocks_getunit(),
megaBytes / (ms / 1000.));
#endif

Matthieu Schaller
committed
/* Write unit conversion factors for this data set */
char buffer[FIELD_BUFFER_SIZE] = {0};
units_cgs_conversion_string(buffer, snapshot_units, props.units,
props.scale_factor_exponent);
float baseUnitsExp[5];
units_get_base_unit_exponents_array(baseUnitsExp, props.units);
io_write_attribute_f(h_data, "U_M exponent", baseUnitsExp[UNIT_MASS]);
io_write_attribute_f(h_data, "U_L exponent", baseUnitsExp[UNIT_LENGTH]);
io_write_attribute_f(h_data, "U_t exponent", baseUnitsExp[UNIT_TIME]);
io_write_attribute_f(h_data, "U_I exponent", baseUnitsExp[UNIT_CURRENT]);
io_write_attribute_f(h_data, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]);
io_write_attribute_f(h_data, "h-scale exponent", 0.f);
io_write_attribute_f(h_data, "a-scale exponent", props.scale_factor_exponent);
io_write_attribute_s(h_data, "Expression for physical CGS units", buffer);

Matthieu Schaller
committed
io_write_attribute_s(h_data, "Lossy compression filter", comp_buffer);

Matthieu Schaller
committed
io_write_attribute_b(h_data, "Value stored as physical", props.is_physical);
io_write_attribute_b(h_data, "Property can be converted to comoving",
props.is_convertible_to_comoving);

Matthieu Schaller
committed
/* Write the actual number this conversion factor corresponds to */
const double factor =
units_cgs_conversion_factor(snapshot_units, props.units);
io_write_attribute_d(
h_data,
"Conversion factor to CGS (not including cosmological corrections)",
factor);
io_write_attribute_d(
h_data,
"Conversion factor to physical CGS (including cosmological corrections)",
factor * pow(e->cosmology->a, props.scale_factor_exponent));
#ifdef SWIFT_DEBUG_CHECKS
if (strlen(props.description) == 0)
error("Invalid (empty) description of the field '%s'", props.name);
#endif
/* Write the full description */
io_write_attribute_s(h_data, "Description", props.description);
/* Free and close everything */
swift_free("writebuff", temp);

Matthieu Schaller
committed
H5Pclose(h_prop);
H5Dclose(h_data);
H5Sclose(h_space);
#ifdef IO_SPEED_MEASUREMENT
if (engine_rank == IO_SPEED_MEASUREMENT || IO_SPEED_MEASUREMENT == -1)
message("'%s' took %.3f %s.", props.name,
clocks_from_ticks(getticks() - tic), clocks_getunit());
#endif

Matthieu Schaller
committed
}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
/**
* @brief Prepares an array in the snapshot.
*
* @param e The #engine we are writing from.
* @param grp The HDF5 grp to write to.
* @param fileName The name of the file we are writing to.
* @param xmfFile The (opened) XMF file we are appending to.
* @param partTypeGroupName The name of the group we are writing to.
* @param props The #io_props of the field to write.
* @param N_total The total number of particles to write in this array.
* @param snapshot_units The units used for the data in this snapshot.
*/
void write_array_virtual(struct engine* e, hid_t grp, const char* fileName_base,
FILE* xmfFile, char* partTypeGroupName,
struct io_props props, long long N_total,
const long long* N_counts, const int num_ranks,
const int ptype,
const enum lossy_compression_schemes lossy_compression,
const struct unit_system* snapshot_units) {
#if H5_VERSION_GE(1, 10, 0)
/* Create data space */
const hid_t h_space = H5Screate(H5S_SIMPLE);
if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name);
int rank = 0;
hsize_t shape[2];
hsize_t source_shape[2];
hsize_t start[2] = {0, 0};
hsize_t count[2];
if (props.dimension > 1) {
rank = 2;
shape[0] = N_total;
shape[1] = props.dimension;
source_shape[0] = 0;
source_shape[1] = props.dimension;
count[0] = 0;
count[1] = props.dimension;
} else {
rank = 1;
shape[0] = N_total;
shape[1] = 0;
source_shape[0] = 0;
source_shape[1] = 0;
count[0] = 0;
count[1] = 0;
}
/* Change shape of data space */
hid_t h_err = H5Sset_extent_simple(h_space, rank, shape, NULL);
if (h_err < 0)
error("Error while changing data space shape for field '%s'.", props.name);
/* Dataset type */
hid_t h_type = H5Tcopy(io_hdf5_type(props.type));
/* Dataset properties */
hid_t h_prop = H5Pcreate(H5P_DATASET_CREATE);
/* Are we imposing some form of lossy compression filter? */
char comp_buffer[32] = "None";
if (lossy_compression != compression_write_lossless)
sprintf(comp_buffer, "%s",
lossy_compression_schemes_names[lossy_compression]);
/* The name of the dataset to map to in the other files */
char source_dataset_name[256];
sprintf(source_dataset_name, "PartType%d/%s", ptype, props.name);
/* Construct a relative base name */
char fileName_relative_base[256];
int pos_last_slash = strlen(fileName_base) - 1;
for (/* */; pos_last_slash >= 0; --pos_last_slash)
if (fileName_base[pos_last_slash] == '/') break;
sprintf(fileName_relative_base, "%s", &fileName_base[pos_last_slash + 1]);
/* Create all the virtual mappings */
for (int i = 0; i < num_ranks; ++i) {
/* Get the number of particles of this type written on this rank */
count[0] = N_counts[i * swift_type_count + ptype];
/* Select the space in the virtual file */
h_err = H5Sselect_hyperslab(h_space, H5S_SELECT_SET, start, /*stride=*/NULL,
count, /*block=*/NULL);
if (h_err < 0) error("Error selecting hyper-slab in the virtual file");
/* Select the space in the (already existing) source file */
source_shape[0] = count[0];
hid_t h_source_space = H5Screate_simple(rank, source_shape, NULL);
if (h_source_space < 0) error("Error creating space in the source file");
char fileName[1024];
sprintf(fileName, "%s.%d.hdf5", fileName_relative_base, i);
/* Make the virtual link */
h_err = H5Pset_virtual(h_prop, h_space, fileName, source_dataset_name,
h_source_space);
if (h_err < 0) error("Error setting the virtual properties");
H5Sclose(h_source_space);
/* Move to the next slab (i.e. next file) */
start[0] += count[0];
}
/* Create virtual dataset */
const hid_t h_data = H5Dcreate(grp, props.name, h_type, h_space, H5P_DEFAULT,
h_prop, H5P_DEFAULT);
if (h_data < 0) error("Error while creating dataspace '%s'.", props.name);
/* Write unit conversion factors for this data set */
char buffer[FIELD_BUFFER_SIZE] = {0};
units_cgs_conversion_string(buffer, snapshot_units, props.units,
props.scale_factor_exponent);
float baseUnitsExp[5];
units_get_base_unit_exponents_array(baseUnitsExp, props.units);
io_write_attribute_f(h_data, "U_M exponent", baseUnitsExp[UNIT_MASS]);
io_write_attribute_f(h_data, "U_L exponent", baseUnitsExp[UNIT_LENGTH]);
io_write_attribute_f(h_data, "U_t exponent", baseUnitsExp[UNIT_TIME]);
io_write_attribute_f(h_data, "U_I exponent", baseUnitsExp[UNIT_CURRENT]);
io_write_attribute_f(h_data, "U_T exponent", baseUnitsExp[UNIT_TEMPERATURE]);
io_write_attribute_f(h_data, "h-scale exponent", 0.f);
io_write_attribute_f(h_data, "a-scale exponent", props.scale_factor_exponent);
io_write_attribute_s(h_data, "Expression for physical CGS units", buffer);
io_write_attribute_s(h_data, "Lossy compression filter", comp_buffer);

Matthieu Schaller
committed
io_write_attribute_b(h_data, "Value stored as physical", props.is_physical);
io_write_attribute_b(h_data, "Property can be converted to comoving",
props.is_convertible_to_comoving);
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
/* Write the actual number this conversion factor corresponds to */
const double factor =
units_cgs_conversion_factor(snapshot_units, props.units);
io_write_attribute_d(
h_data,
"Conversion factor to CGS (not including cosmological corrections)",
factor);
io_write_attribute_d(
h_data,
"Conversion factor to physical CGS (including cosmological corrections)",
factor * pow(e->cosmology->a, props.scale_factor_exponent));
#ifdef SWIFT_DEBUG_CHECKS
if (strlen(props.description) == 0)
error("Invalid (empty) description of the field '%s'", props.name);
#endif
/* Write the full description */
io_write_attribute_s(h_data, "Description", props.description);
/* Add a line to the XMF */
if (xmfFile != NULL) {
char fileName[1024];
sprintf(fileName, "%s.hdf5", fileName_base);
xmf_write_line(xmfFile, fileName, /*distributed=*/1, partTypeGroupName,
props.name, N_total, props.dimension, props.type);
}
/* Close everything */
H5Tclose(h_type);
H5Pclose(h_prop);
H5Dclose(h_data);
H5Sclose(h_space);
#else
error(
"Function cannot be called when the code is compiled with hdf5 older "
"than 1.10.0");
#endif
}
/**
* @brief Prepares a file for a parallel write.
*
* @param e The #engine.
* @param fileName The file name to write to.
* @param N_total The total number of particles of each type to write.
* @param numFields The number of fields to write for each particle type.
* @param internal_units The #unit_system used internally.
* @param snapshot_units The #unit_system used in the snapshots.
* @param fof Is this a snapshot related to a stand-alone FOF call?
* @param subsample_any Are any fields being subsampled?
* @param subsample_fraction The subsampling fraction of each particle type.
*/
void write_virtual_file(struct engine* e, const char* fileName_base,
const char* xmfFileName,
const long long N_total[swift_type_count],
const long long* N_counts, const int num_ranks,
const int to_write[swift_type_count],
const int numFields[swift_type_count],
char current_selection_name[FIELD_BUFFER_SIZE],
const struct unit_system* internal_units,
const struct unit_system* snapshot_units, const int fof,
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
const int subsample_any,
const float subsample_fraction[swift_type_count]) {
#if H5_VERSION_GE(1, 10, 0)
struct output_options* output_options = e->output_options;
const int with_cosmology = e->policy & engine_policy_cosmology;
const int with_cooling = e->policy & engine_policy_cooling;
const int with_temperature = e->policy & engine_policy_temperature;
const int with_fof = e->policy & engine_policy_fof;
#ifdef HAVE_VELOCIRAPTOR
const int with_stf = (e->policy & engine_policy_structure_finding) &&
(e->s->gpart_group_data != NULL);
#else
const int with_stf = 0;
#endif
const int with_rt = e->policy & engine_policy_rt;
FILE* xmfFile = 0;
int numFiles = 1;
/* First time, we need to create the XMF file */
if (e->snapshot_output_count == 0) xmf_create_file(xmfFileName);
/* Prepare the XMF file for the new entry */
xmfFile = xmf_prepare_file(xmfFileName);
char fileName[1024];
sprintf(fileName, "%s.hdf5", fileName_base);
/* Write the part of the XMF file corresponding to this
* specific output */
xmf_write_outputheader(xmfFile, fileName, e->time);

Matthieu Schaller
committed
/* Set the minimal API version to avoid issues with advanced features */
hid_t h_props = H5Pcreate(H5P_FILE_ACCESS);
herr_t err = H5Pset_libver_bounds(h_props, HDF5_LOWEST_FILE_FORMAT_VERSION,
HDF5_HIGHEST_FILE_FORMAT_VERSION);
if (err < 0) error("Error setting the hdf5 API version");
/* Open HDF5 file with the chosen parameters */

Matthieu Schaller
committed
hid_t h_file = H5Fcreate(fileName, H5F_ACC_TRUNC, H5P_DEFAULT, h_props);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
if (h_file < 0) error("Error while opening file '%s'.", fileName);
/* Open header to write simulation properties */
/* message("Writing file header..."); */
hid_t h_grp =
H5Gcreate(h_file, "/Header", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (h_grp < 0) error("Error while creating file header\n");
/* Convert basic output information to snapshot units */
const double factor_time =
units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_TIME);
const double factor_length =
units_conversion_factor(internal_units, snapshot_units, UNIT_CONV_LENGTH);
const double dblTime = e->time * factor_time;
const double dim[3] = {e->s->dim[0] * factor_length,
e->s->dim[1] * factor_length,
e->s->dim[2] * factor_length};
/* Print the relevant information and print status */
io_write_attribute(h_grp, "BoxSize", DOUBLE, dim, 3);
io_write_attribute(h_grp, "Time", DOUBLE, &dblTime, 1);
const int dimension = (int)hydro_dimension;
io_write_attribute(h_grp, "Dimension", INT, &dimension, 1);
io_write_attribute(h_grp, "Redshift", DOUBLE, &e->cosmology->z, 1);
io_write_attribute(h_grp, "Scale-factor", DOUBLE, &e->cosmology->a, 1);
io_write_attribute_s(h_grp, "Code", "SWIFT");
io_write_attribute_s(h_grp, "RunName", e->run_name);
io_write_attribute_s(h_grp, "System", hostname());
io_write_attribute(h_grp, "Shift", DOUBLE, e->s->initial_shift, 3);
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
/* Write out the particle types */
io_write_part_type_names(h_grp);
/* Write out the time-base */
if (with_cosmology) {
io_write_attribute_d(h_grp, "TimeBase_dloga", e->time_base);
const double delta_t = cosmology_get_timebase(e->cosmology, e->ti_current);
io_write_attribute_d(h_grp, "TimeBase_dt", delta_t);
} else {
io_write_attribute_d(h_grp, "TimeBase_dloga", 0);
io_write_attribute_d(h_grp, "TimeBase_dt", e->time_base);
}
/* Store the time at which the snapshot was written */
time_t tm = time(NULL);
struct tm* timeinfo = localtime(&tm);
char snapshot_date[64];
strftime(snapshot_date, 64, "%T %F %Z", timeinfo);
io_write_attribute_s(h_grp, "SnapshotDate", snapshot_date);
/* GADGET-2 legacy values */
/* Number of particles of each type */
long long numParticlesThisFile[swift_type_count] = {0};
unsigned int numParticles[swift_type_count] = {0};
unsigned int numParticlesHighWord[swift_type_count] = {0};
for (int ptype = 0; ptype < swift_type_count; ++ptype) {
numParticles[ptype] = (unsigned int)N_total[ptype];
numParticlesHighWord[ptype] = (unsigned int)(N_total[ptype] >> 32);
if (numFields[ptype] == 0) {
numParticlesThisFile[ptype] = 0;
} else {
numParticlesThisFile[ptype] = N_total[ptype];
}
}
io_write_attribute(h_grp, "NumPart_ThisFile", LONGLONG, numParticlesThisFile,
swift_type_count);
io_write_attribute(h_grp, "NumPart_Total", UINT, numParticles,
swift_type_count);
io_write_attribute(h_grp, "NumPart_Total_HighWord", UINT,
numParticlesHighWord, swift_type_count);
io_write_attribute(h_grp, "TotalNumberOfParticles", LONGLONG, N_total,
swift_type_count);
double MassTable[swift_type_count] = {0};
io_write_attribute(h_grp, "MassTable", DOUBLE, MassTable, swift_type_count);
io_write_attribute(h_grp, "InitialMassTable", DOUBLE,
e->s->initial_mean_mass_particles, swift_type_count);
unsigned int flagEntropy[swift_type_count] = {0};
flagEntropy[0] = writeEntropyFlag();
io_write_attribute(h_grp, "Flag_Entropy_ICs", UINT, flagEntropy,
swift_type_count);
io_write_attribute(h_grp, "NumFilesPerSnapshot", INT, &numFiles, 1);
io_write_attribute_i(h_grp, "ThisFile", 0);
io_write_attribute_s(h_grp, "SelectOutput", current_selection_name);
io_write_attribute_i(h_grp, "Virtual", 1);
io_write_attribute(h_grp, "CanHaveTypes", INT, to_write, swift_type_count);
if (subsample_any) {
io_write_attribute_s(h_grp, "OutputType", "SubSampled");
io_write_attribute(h_grp, "SubSampleFractions", FLOAT, subsample_fraction,
swift_type_count);
} else {
io_write_attribute_s(h_grp, "OutputType", "FullVolume");
}
/* Close header */
H5Gclose(h_grp);
/* Copy metadata from ICs to the file */
ic_info_write_hdf5(e->ics_metadata, h_file);
/* Write all the meta-data */
io_write_meta_data(h_file, e, internal_units, snapshot_units, fof);
/* Loop over all particle types */
for (int ptype = 0; ptype < swift_type_count; ptype++) {
/* Don't do anything if there are
* (a) no particles of this kind in this run, or
* (b) if we have disabled every field of this particle type. */
if (!to_write[ptype] || numFields[ptype] == 0) continue;
/* Add the global information for that particle type to
* the XMF meta-file */
xmf_write_groupheader(xmfFile, fileName, /*distributed=*/1, N_total[ptype],
(enum part_type)ptype);
/* Create the particle group in the file */
char partTypeGroupName[PARTICLE_GROUP_BUFFER_SIZE];
snprintf(partTypeGroupName, PARTICLE_GROUP_BUFFER_SIZE, "/PartType%d",
ptype);
h_grp = H5Gcreate(h_file, partTypeGroupName, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
if (h_grp < 0)
error("Error while creating particle group %s.", partTypeGroupName);
/* Add an alias name for convenience */
char aliasName[PARTICLE_GROUP_BUFFER_SIZE];
snprintf(aliasName, PARTICLE_GROUP_BUFFER_SIZE, "/%sParticles",
part_type_names[ptype]);
hid_t h_err = H5Lcreate_soft(partTypeGroupName, h_grp, aliasName,
H5P_DEFAULT, H5P_DEFAULT);
if (h_err < 0) error("Error while creating alias for particle group.\n");
/* Write the number of particles as an attribute */
io_write_attribute_ll(h_grp, "NumberOfParticles", N_total[ptype]);
io_write_attribute_ll(h_grp, "TotalNumberOfParticles", N_total[ptype]);
int num_fields = 0;
struct io_props list[io_max_size_output_list];
bzero(list, io_max_size_output_list * sizeof(struct io_props));
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
/* Write particle fields from the particle structure */
switch (ptype) {
case swift_type_gas:
io_select_hydro_fields(NULL, NULL, with_cosmology, with_cooling,
with_temperature, with_fof, with_stf, with_rt, e,
&num_fields, list);
break;
case swift_type_dark_matter:
case swift_type_dark_matter_background:
io_select_dm_fields(NULL, NULL, with_fof, with_stf, e, &num_fields,
list);
break;
case swift_type_neutrino:
io_select_neutrino_fields(NULL, NULL, with_fof, with_stf, e,
&num_fields, list);
break;
case swift_type_sink:
io_select_sink_fields(NULL, with_cosmology, with_fof, with_stf, e,
&num_fields, list);
break;
case swift_type_stars:
io_select_star_fields(NULL, with_cosmology, with_fof, with_stf, with_rt,
e, &num_fields, list);
break;
case swift_type_black_hole:
io_select_bh_fields(NULL, with_cosmology, with_fof, with_stf, e,
&num_fields, list);
break;
default:
error("Particle Type %d not yet supported. Aborting", ptype);
}
/* Verify we are not going to crash when writing below */
if (num_fields >= io_max_size_output_list)
error("Too many fields to write for particle type %d", ptype);
for (int i = 0; i < num_fields; ++i) {
if (!list[i].is_used) error("List of field contains an empty entry!");
if (!list[i].dimension)
error("Dimension of field '%s' is <= 1!", list[i].name);
}
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
/* Did the user specify a non-standard default for the entire particle
* type? */
const enum lossy_compression_schemes compression_level_current_default =
output_options_get_ptype_default_compression(
output_options->select_output, current_selection_name,
(enum part_type)ptype, e->verbose);
/* Prepare everything that is not cancelled */
int num_fields_written = 0;
for (int i = 0; i < num_fields; ++i) {
/* Did the user cancel this field? */
const enum lossy_compression_schemes compression_level =
output_options_get_field_compression(
output_options, current_selection_name, list[i].name,
(enum part_type)ptype, compression_level_current_default,
e->verbose);
if (compression_level != compression_do_not_write) {
write_array_virtual(e, h_grp, fileName_base, xmfFile, partTypeGroupName,
list[i], N_total[ptype], N_counts, num_ranks, ptype,
compression_level, snapshot_units);
num_fields_written++;
}
}
/* Only write this now that we know exactly how many fields there are. */
io_write_attribute_i(h_grp, "NumberOfFields", num_fields_written);
/* Close particle group */
H5Gclose(h_grp);
/* Close this particle group in the XMF file as well */
xmf_write_groupfooter(xmfFile, (enum part_type)ptype);
}
/* Write LXMF file descriptor */
xmf_write_outputfooter(xmfFile, e->snapshot_output_count, e->time);

Matthieu Schaller
committed
/* Close the file */
H5Fclose(h_file);

Matthieu Schaller
committed
H5Pclose(h_props);
#else
error(
"Function cannot be called when the code is compiled with hdf5 older "
"than 1.10.0");
#endif
}

Matthieu Schaller
committed
/**
* @brief Writes a snapshot distributed into multiple files.
*
* @param e The engine containing all the system.
* @param internal_units The #unit_system used internally
* @param snapshot_units The #unit_system used in the snapshots
* @param fof Is this a snapshot related to a stand-alone FOF call?
* @param mpi_rank The rank number of the calling MPI rank.
* @param mpi_size the number of MPI ranks.
* @param comm The communicator used by the MPI ranks.
* @param info The MPI information object.

Matthieu Schaller
committed
*
* Creates a series of HDF5 output files (1 per MPI node) as a snapshot.
* Writes the particles contained in the engine.
* If such files already exist, it is erased and replaced by the new one.
* The companion XMF file is also updated accordingly.
*/
void write_output_distributed(struct engine* e,

Matthieu Schaller
committed
const struct unit_system* internal_units,
const struct unit_system* snapshot_units,
const int fof, const int mpi_rank,
const int mpi_size, MPI_Comm comm,
MPI_Info info) {

Matthieu Schaller
committed
hid_t h_file = 0, h_grp = 0;
int numFiles = mpi_size;
const struct part* parts = e->s->parts;
const struct xpart* xparts = e->s->xparts;
const struct gpart* gparts = e->s->gparts;

Matthieu Schaller
committed
const struct spart* sparts = e->s->sparts;
const struct bpart* bparts = e->s->bparts;
struct output_options* output_options = e->output_options;
struct output_list* output_list = e->output_list_snapshots;

Matthieu Schaller
committed
const int with_cosmology = e->policy & engine_policy_cosmology;
const int with_cooling = e->policy & engine_policy_cooling;
const int with_temperature = e->policy & engine_policy_temperature;
const int with_fof = e->policy & engine_policy_fof;
const int with_DM_background = e->s->with_DM_background;
const int with_DM = e->s->with_DM;
const int with_neutrinos = e->s->with_neutrinos;
const int with_rt = e->policy & engine_policy_rt;
const int with_hydro = (e->policy & engine_policy_hydro) ? 1 : 0;
const int with_stars = (e->policy & engine_policy_stars) ? 1 : 0;
const int with_black_hole = (e->policy & engine_policy_black_holes) ? 1 : 0;
const int with_sink = (e->policy & engine_policy_sinks) ? 1 : 0;

Matthieu Schaller
committed
#ifdef HAVE_VELOCIRAPTOR
const int with_stf = (e->policy & engine_policy_structure_finding) &&
(e->s->gpart_group_data != NULL);
#else
const int with_stf = 0;
#endif
/* Number of particles currently in the arrays */
const size_t Ntot = e->s->nr_gparts;
const size_t Ngas = e->s->nr_parts;

Matthieu Schaller
committed
const size_t Nstars = e->s->nr_sparts;
const size_t Nblackholes = e->s->nr_bparts;
/* Determine if we are writing a reduced snapshot, and if so which
* output selection type to use */
char current_selection_name[FIELD_BUFFER_SIZE] =
select_output_header_default_name;
if (output_list) {
/* Users could have specified a different Select Output scheme for each
* snapshot. */
output_list_get_current_select_output(output_list, current_selection_name);
}
int snap_count = -1;
int number_digits = -1;
if (output_list && output_list->alternative_labels_on) {
snap_count = output_list->snapshot_labels[snap_count];
number_digits = 0;
} else if (e->snapshot_invoke_stf) {
snap_count = e->stf_output_count;
number_digits = 4;
} else {
snap_count = e->snapshot_output_count;
number_digits = 4;
/* Directory and file name */
char dirName[1024];
char fileName[1024];
char fileName_base[1024];
char xmfFileName[FILENAME_BUFFER_SIZE];
char snapshot_subdir_name[FILENAME_BUFFER_SIZE];
char snapshot_base_name[FILENAME_BUFFER_SIZE];
output_options_get_basename(output_options, current_selection_name,
e->snapshot_subdir, e->snapshot_base_name,
snapshot_subdir_name, snapshot_base_name);
io_get_snapshot_filename(
fileName, xmfFileName, output_list, e->snapshot_invoke_stf,
e->stf_output_count, e->snapshot_output_count, e->snapshot_subdir,
snapshot_subdir_name, e->snapshot_base_name, snapshot_base_name);
/* Are we using a sub-dir? */
if (strnlen(e->snapshot_subdir, PARSER_MAX_LINE_SIZE) > 0) {
sprintf(dirName, "%s/%s_%0*d", snapshot_subdir_name, snapshot_base_name,
number_digits, snap_count);
sprintf(fileName, "%s/%s_%0*d/%s_%0*d.%d.hdf5", snapshot_subdir_name,
snapshot_base_name, number_digits, snap_count, snapshot_base_name,
number_digits, snap_count, mpi_rank);
sprintf(fileName_base, "%s/%s_%0*d/%s_%0*d", snapshot_subdir_name,
snapshot_base_name, number_digits, snap_count, snapshot_base_name,
number_digits, snap_count);
} else {
sprintf(dirName, "%s_%0*d", snapshot_base_name, number_digits, snap_count);
sprintf(fileName, "%s_%0*d/%s_%0*d.%d.hdf5", snapshot_base_name,
number_digits, snap_count, snapshot_base_name, number_digits,
snap_count, mpi_rank);
sprintf(fileName_base, "%s_%0*d/%s_%0*d", snapshot_base_name, number_digits,
snap_count, snapshot_base_name, number_digits, snap_count);
}
/* Create the directory */
if (mpi_rank == 0) safe_checkdir(snapshot_subdir_name, /*create=*/1);
if (mpi_rank == 0) safe_checkdir(dirName, /*create=*/1);
MPI_Barrier(comm);

Matthieu Schaller
committed
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
/* Do we want to sub-sample any of the arrays */
int subsample[swift_type_count];
float subsample_fraction[swift_type_count];
for (int i = 0; i < swift_type_count; ++i) {
subsample[i] = 0;
subsample_fraction[i] = 1.f;
}
output_options_get_subsampling(
output_options, current_selection_name, e->snapshot_subsample,
e->snapshot_subsample_fraction, subsample, subsample_fraction);
/* Is any particle type being subsampled? */
int subsample_any = 0;
for (int i = 0; i < swift_type_count; ++i) {
subsample_any += subsample[i];
if (!subsample[i]) subsample_fraction[i] = 1.f;
}
/* Number of particles that we will write */
size_t Ngas_written, Ndm_written, Ndm_background, Ndm_neutrino,
Nsinks_written, Nstars_written, Nblackholes_written;
if (subsample[swift_type_gas]) {
Ngas_written = io_count_gas_to_write(e->s, /*subsample=*/1,
subsample_fraction[swift_type_gas],
e->snapshot_output_count);
} else {
Ngas_written =
e->s->nr_parts - e->s->nr_inhibited_parts - e->s->nr_extra_parts;
}
if (subsample[swift_type_stars]) {
Nstars_written = io_count_stars_to_write(
e->s, /*subsample=*/1, subsample_fraction[swift_type_stars],
e->snapshot_output_count);
} else {
Nstars_written =
e->s->nr_sparts - e->s->nr_inhibited_sparts - e->s->nr_extra_sparts;
}
if (subsample[swift_type_black_hole]) {
Nblackholes_written = io_count_black_holes_to_write(
e->s, /*subsample=*/1, subsample_fraction[swift_type_black_hole],
e->snapshot_output_count);
} else {
Nblackholes_written =
e->s->nr_bparts - e->s->nr_inhibited_bparts - e->s->nr_extra_bparts;
}
if (subsample[swift_type_sink]) {
Nsinks_written = io_count_sinks_to_write(
e->s, /*subsample=*/1, subsample_fraction[swift_type_sink],
e->snapshot_output_count);
} else {
Nsinks_written =
e->s->nr_sinks - e->s->nr_inhibited_sinks - e->s->nr_extra_sinks;
}
Ndm_written = io_count_dark_matter_to_write(
e->s, subsample[swift_type_dark_matter],
subsample_fraction[swift_type_dark_matter], e->snapshot_output_count);
if (with_DM_background) {
e->s, subsample[swift_type_dark_matter_background],
subsample_fraction[swift_type_dark_matter_background],
e->snapshot_output_count);
} else {
Ndm_background = 0;
}
if (with_neutrinos) {
Ndm_neutrino = io_count_neutrinos_to_write(
e->s, subsample[swift_type_neutrino],
subsample_fraction[swift_type_neutrino], e->snapshot_output_count);
} else {
Ndm_neutrino = 0;
}

Matthieu Schaller
committed
/* Compute offset in the file and total number of particles */
long long N[swift_type_count] = {
Ngas_written, Ndm_written, Ndm_background, Nsinks_written,
Nstars_written, Nblackholes_written, Ndm_neutrino};

Matthieu Schaller
committed
/* Gather the total number of particles to write */
long long N_total[swift_type_count] = {0};
MPI_Allreduce(N, N_total, swift_type_count, MPI_LONG_LONG_INT, MPI_SUM, comm);
/* Collect the number of particles written by each rank */
long long* N_counts =
(long long*)malloc(mpi_size * swift_type_count * sizeof(long long));
MPI_Gather(N, swift_type_count, MPI_LONG_LONG_INT, N_counts, swift_type_count,
MPI_LONG_LONG_INT, 0, comm);
/* List what fields to write.
* Note that we want to want to write a 0-size dataset for some species
* in case future snapshots will contain them (e.g. star formation) */
const int to_write[swift_type_count] = {
with_hydro, with_DM, with_DM_background, with_sink,
with_stars, with_black_hole, with_neutrinos
};
Matthieu Schaller
committed
/* Use a single Lustre stripe with a rank-based OST offset? */
if (e->snapshot_lustre_OST_checks != 0) {
/* Gather information about the current state of the OSTs. */
struct swift_ost_store ost_infos;