diff --git a/src/distributed_io.c b/src/distributed_io.c index 95cfa297f472ef4a450061c7bd24dd23d62374e9..8363ab5cf1077be61daf48c9ebee2cc8a5ef0e3c 100644 --- a/src/distributed_io.c +++ b/src/distributed_io.c @@ -64,6 +64,9 @@ /* Are we timing the i/o? */ //#define IO_SPEED_MEASUREMENT +/* Max number of entries that can be written for a given particle type */ +static const int io_max_size_output_list = 100; + /** * @brief Writes a data array in given HDF5 group. * @@ -642,8 +645,8 @@ void write_virtual_file(struct engine* e, const char* fileName_base, io_write_attribute_ll(h_grp, "TotalNumberOfParticles", N_total[ptype]); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); /* Write particle fields from the particle structure */ switch (ptype) { @@ -684,6 +687,15 @@ void write_virtual_file(struct engine* e, const char* fileName_base, error("Particle Type %d not yet supported. Aborting", ptype); } + /* Verify we are not going to crash when writing below */ + if (num_fields >= io_max_size_output_list) + error("Too many fields to write for particle type %d", ptype); + for (int i = 0; i < num_fields; ++i) { + if (!list[i].is_used) error("List of field contains an empty entry!"); + if (!list[i].dimension) + error("Dimension of field '%s' is <= 1!", list[i].name); + } + /* Did the user specify a non-standard default for the entire particle * type? */ const enum lossy_compression_schemes compression_level_current_default = @@ -1147,8 +1159,8 @@ void write_output_distributed(struct engine* e, io_write_attribute_ll(h_grp, "TotalNumberOfParticles", N_total[ptype]); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); size_t Nparticles = 0; struct part* parts_written = NULL; @@ -1416,6 +1428,15 @@ void write_output_distributed(struct engine* e, error("Particle Type %d not yet supported. Aborting", ptype); } + /* Verify we are not going to crash when writing below */ + if (num_fields >= io_max_size_output_list) + error("Too many fields to write for particle type %d", ptype); + for (int i = 0; i < num_fields; ++i) { + if (!list[i].is_used) error("List of field contains an empty entry!"); + if (!list[i].dimension) + error("Dimension of field '%s' is <= 1!", list[i].name); + } + /* Did the user specify a non-standard default for the entire particle * type? */ const enum lossy_compression_schemes compression_level_current_default = diff --git a/src/parallel_io.c b/src/parallel_io.c index 97564cecd99038d513ee8bc0e9279f4dfa974ea8..457cdfbf6fd02a7307d65ab34c7fee914fb9a02c 100644 --- a/src/parallel_io.c +++ b/src/parallel_io.c @@ -71,6 +71,9 @@ /* Are we timing the i/o? */ //#define IO_SPEED_MEASUREMENT +/* Max number of entries that can be written for a given particle type */ +static const int io_max_size_output_list = 100; + /** * @brief Reads a chunk of data from an open HDF5 dataset * @@ -997,8 +1000,8 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units, error("Error while opening particle group %s.", partTypeGroupName); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); size_t Nparticles = 0; /* Read particle fields into the particle structure */ @@ -1332,8 +1335,8 @@ void prepare_file(struct engine* e, const char* fileName, io_write_attribute_ll(h_grp, "TotalNumberOfParticles", N_total[ptype]); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); /* Write particle fields from the particle structure */ switch (ptype) { @@ -1374,6 +1377,15 @@ void prepare_file(struct engine* e, const char* fileName, error("Particle Type %d not yet supported. Aborting", ptype); } + /* Verify we are not going to crash when writing below */ + if (num_fields >= io_max_size_output_list) + error("Too many fields to write for particle type %d", ptype); + for (int i = 0; i < num_fields; ++i) { + if (!list[i].is_used) error("List of field contains an empty entry!"); + if (!list[i].dimension) + error("Dimension of field '%s' is <= 1!", list[i].name); + } + /* Did the user specify a non-standard default for the entire particle * type? */ const enum lossy_compression_schemes compression_level_current_default = diff --git a/src/serial_io.c b/src/serial_io.c index 586c188fea37f7d394e592cdb2ef7e0e485cda1c..f68d32b5ce406b10820b656d3b7998b462dc5880 100644 --- a/src/serial_io.c +++ b/src/serial_io.c @@ -63,6 +63,9 @@ #include "version.h" #include "xmf.h" +/* Max number of entries that can be written for a given particle type */ +static const int io_max_size_output_list = 100; + /** * @brief Reads a data array from a given HDF5 group. * @@ -794,8 +797,8 @@ void read_ic_serial(char* fileName, const struct unit_system* internal_units, error("Error while opening particle group %s.", partTypeGroupName); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); size_t Nparticles = 0; /* Read particle fields into the particle structure */ @@ -1363,8 +1366,8 @@ void write_output_serial(struct engine* e, error("Error while opening particle group %s.", partTypeGroupName); int num_fields = 0; - struct io_props list[100]; - bzero(list, 100 * sizeof(struct io_props)); + struct io_props list[io_max_size_output_list]; + bzero(list, io_max_size_output_list * sizeof(struct io_props)); size_t Nparticles = 0; struct part* parts_written = NULL; @@ -1637,6 +1640,15 @@ void write_output_serial(struct engine* e, error("Particle Type %d not yet supported. Aborting", ptype); } + /* Verify we are not going to crash when writing below */ + if (num_fields >= io_max_size_output_list) + error("Too many fields to write for particle type %d", ptype); + for (int i = 0; i < num_fields; ++i) { + if (!list[i].is_used) error("List of field contains an empty entry!"); + if (!list[i].dimension) + error("Dimension of field '%s' is <= 1!", list[i].name); + } + /* Did the user specify a non-standard default for the entire particle * type? */ const enum lossy_compression_schemes compression_level_current_default = diff --git a/src/single_io.c b/src/single_io.c index 1d38ae5eafe8892ad6f1833881a34067722c2623..8b1892af199931bbf882014c4d35413893a03905 100644 --- a/src/single_io.c +++ b/src/single_io.c @@ -64,7 +64,7 @@ #include "xmf.h" /* Max number of entries that can be written for a given particle type */ -const int io_max_size_output_list = 100; +static const int io_max_size_output_list = 100; /** * @brief Reads a data array from a given HDF5 group.