Commit 25a1f9fa authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Merge branch 'io_chunk_sizes' into 'master'

Check if compression turned on before deciding on IO chunk size

See merge request !1107
parents 9aed5974 729bbd31
...@@ -109,6 +109,9 @@ void write_distributed_array(const struct engine* e, hid_t grp, ...@@ -109,6 +109,9 @@ void write_distributed_array(const struct engine* e, hid_t grp,
if (h_space < 0) if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name); error("Error while creating data space for field '%s'.", props.name);
/* Decide what chunk size to use based on compression */
int log2_chunk_size = e->snapshot_compression > 0 ? 12 : 18;
int rank; int rank;
hsize_t shape[2]; hsize_t shape[2];
hsize_t chunk_shape[2]; hsize_t chunk_shape[2];
...@@ -117,13 +120,13 @@ void write_distributed_array(const struct engine* e, hid_t grp, ...@@ -117,13 +120,13 @@ void write_distributed_array(const struct engine* e, hid_t grp,
rank = 2; rank = 2;
shape[0] = N; shape[0] = N;
shape[1] = props.dimension; shape[1] = props.dimension;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = props.dimension; chunk_shape[1] = props.dimension;
} else { } else {
rank = 1; rank = 1;
shape[0] = N; shape[0] = N;
shape[1] = 0; shape[1] = 0;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = 0; chunk_shape[1] = 0;
} }
......
...@@ -286,6 +286,9 @@ void write_los_hdf5_dataset(const struct io_props props, const size_t N, ...@@ -286,6 +286,9 @@ void write_los_hdf5_dataset(const struct io_props props, const size_t N,
if (h_space < 0) if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name); error("Error while creating data space for field '%s'.", props.name);
/* Decide what chunk size to use based on compression */
int log2_chunk_size = e->snapshot_compression > 0 ? 12 : 18;
int rank = 0; int rank = 0;
hsize_t shape[2]; hsize_t shape[2];
hsize_t chunk_shape[2]; hsize_t chunk_shape[2];
...@@ -293,13 +296,13 @@ void write_los_hdf5_dataset(const struct io_props props, const size_t N, ...@@ -293,13 +296,13 @@ void write_los_hdf5_dataset(const struct io_props props, const size_t N,
rank = 2; rank = 2;
shape[0] = N; shape[0] = N;
shape[1] = props.dimension; shape[1] = props.dimension;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = props.dimension; chunk_shape[1] = props.dimension;
} else { } else {
rank = 1; rank = 1;
shape[0] = N; shape[0] = N;
shape[1] = 0; shape[1] = 0;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = 0; chunk_shape[1] = 0;
} }
......
...@@ -250,6 +250,9 @@ void prepare_array_serial(const struct engine* e, hid_t grp, char* fileName, ...@@ -250,6 +250,9 @@ void prepare_array_serial(const struct engine* e, hid_t grp, char* fileName,
if (h_space < 0) if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name); error("Error while creating data space for field '%s'.", props.name);
/* Decide what chunk size to use based on compression */
int log2_chunk_size = e->snapshot_compression > 0 ? 12 : 18;
int rank = 0; int rank = 0;
hsize_t shape[2]; hsize_t shape[2];
hsize_t chunk_shape[2]; hsize_t chunk_shape[2];
...@@ -257,13 +260,13 @@ void prepare_array_serial(const struct engine* e, hid_t grp, char* fileName, ...@@ -257,13 +260,13 @@ void prepare_array_serial(const struct engine* e, hid_t grp, char* fileName,
rank = 2; rank = 2;
shape[0] = N_total; shape[0] = N_total;
shape[1] = props.dimension; shape[1] = props.dimension;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = props.dimension; chunk_shape[1] = props.dimension;
} else { } else {
rank = 1; rank = 1;
shape[0] = N_total; shape[0] = N_total;
shape[1] = 0; shape[1] = 0;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = 0; chunk_shape[1] = 0;
} }
......
...@@ -255,6 +255,9 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName, ...@@ -255,6 +255,9 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
if (h_space < 0) if (h_space < 0)
error("Error while creating data space for field '%s'.", props.name); error("Error while creating data space for field '%s'.", props.name);
/* Decide what chunk size to use based on compression */
int log2_chunk_size = e->snapshot_compression > 0 ? 12 : 18;
int rank; int rank;
hsize_t shape[2]; hsize_t shape[2];
hsize_t chunk_shape[2]; hsize_t chunk_shape[2];
...@@ -263,13 +266,13 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName, ...@@ -263,13 +266,13 @@ void write_array_single(const struct engine* e, hid_t grp, char* fileName,
rank = 2; rank = 2;
shape[0] = N; shape[0] = N;
shape[1] = props.dimension; shape[1] = props.dimension;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = props.dimension; chunk_shape[1] = props.dimension;
} else { } else {
rank = 1; rank = 1;
shape[0] = N; shape[0] = N;
shape[1] = 0; shape[1] = 0;
chunk_shape[0] = 1 << 20; /* Just a guess...*/ chunk_shape[0] = 1 << log2_chunk_size;
chunk_shape[1] = 0; chunk_shape[1] = 0;
} }
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment