Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
SWIFTsim
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
SWIFT
SWIFTsim
Commits
8a35b6f7
Commit
8a35b6f7
authored
7 years ago
by
Matthieu Schaller
Browse files
Options
Downloads
Patches
Plain Diff
Move the parallel i/o speed measurement code from comments to #ifdef sections.
parent
937e029b
Branches
Branches containing commit
Tags
Tags containing commit
1 merge request
!460
Improvements to i/o and parallel-i/o
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/parallel_io.c
+40
-25
40 additions, 25 deletions
src/parallel_io.c
with
40 additions
and
25 deletions
src/parallel_io.c
+
40
−
25
View file @
8a35b6f7
...
@@ -51,6 +51,9 @@
...
@@ -51,6 +51,9 @@
#include
"units.h"
#include
"units.h"
#include
"xmf.h"
#include
"xmf.h"
/* Are we timing the i/o? */
//#define IO_SPEED_MEASUREMENT
/* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */
/* The current limit of ROMIO (the underlying MPI-IO layer) is 2GB */
#define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL
#define HDF5_PARALLEL_IO_MAX_BYTES 2000000000LL
...
@@ -266,16 +269,20 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
...
@@ -266,16 +269,20 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
num_elements
*
typeSize
)
!=
0
)
num_elements
*
typeSize
)
!=
0
)
error
(
"Unable to allocate temporary i/o buffer"
);
error
(
"Unable to allocate temporary i/o buffer"
);
/* MPI_Barrier(MPI_COMM_WORLD); */
#ifdef IO_SPEED_MEASUREMENT
/* ticks tic = getticks(); */
MPI_Barrier
(
MPI_COMM_WORLD
);
ticks
tic
=
getticks
();
#endif
/* Copy the particle data to the temporary buffer */
/* Copy the particle data to the temporary buffer */
io_copy_temp_buffer
(
temp
,
e
,
props
,
N
,
internal_units
,
snapshot_units
);
io_copy_temp_buffer
(
temp
,
e
,
props
,
N
,
internal_units
,
snapshot_units
);
/* MPI_Barrier(MPI_COMM_WORLD); */
#ifdef IO_SPEED_MEASUREMENT
/* if(engine_rank == 0) */
MPI_Barrier
(
MPI_COMM_WORLD
);
/* message( "Copying for '%s' took %.3f %s." , props.name, */
if
(
engine_rank
==
0
)
/* clocks_from_ticks(getticks() - tic), clocks_getunit()); */
message
(
"Copying for '%s' took %.3f %s."
,
props
.
name
,
clocks_from_ticks
(
getticks
()
-
tic
),
clocks_getunit
());
#endif
/* Create data space */
/* Create data space */
const
hid_t
h_memspace
=
H5Screate
(
H5S_SIMPLE
);
const
hid_t
h_memspace
=
H5Screate
(
H5S_SIMPLE
);
...
@@ -318,12 +325,13 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
...
@@ -318,12 +325,13 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
}
}
/* message("Writing %lld '%s', %zd elements = %zd bytes (int=%d) at offset
/* message("Writing %lld '%s', %zd elements = %zd bytes (int=%d) at offset
* %zd", */
* %zd", N, props.name, N * props.dimension, N * props.dimension * typeSize, */
/* N, props.name, N * props.dimension, N * props.dimension * typeSize, */
/* (int)(N * props.dimension * typeSize), offset); */
/* (int)(N * props.dimension * typeSize), offset); */
/* MPI_Barrier(MPI_COMM_WORLD); */
#ifdef IO_SPEED_MEASUREMENT
/* tic = getticks(); */
MPI_Barrier
(
MPI_COMM_WORLD
);
tic
=
getticks
();
#endif
/* Write temporary buffer to HDF5 dataspace */
/* Write temporary buffer to HDF5 dataspace */
h_err
=
H5Dwrite
(
h_data
,
io_hdf5_type
(
props
.
type
),
h_memspace
,
h_filespace
,
h_err
=
H5Dwrite
(
h_data
,
io_hdf5_type
(
props
.
type
),
h_memspace
,
h_filespace
,
...
@@ -332,15 +340,17 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
...
@@ -332,15 +340,17 @@ void writeArray_chunk(struct engine* e, hid_t h_data, hid_t h_plist_id,
error
(
"Error while writing data array '%s'."
,
props
.
name
);
error
(
"Error while writing data array '%s'."
,
props
.
name
);
}
}
/* MPI_Barrier(MPI_COMM_WORLD); */
#ifdef IO_SPEED_MEASUREMENT
/* ticks toc = getticks(); */
MPI_Barrier
(
MPI_COMM_WORLD
);
/* float ms = clocks_from_ticks(toc - tic); */
ticks
toc
=
getticks
();
/* int megaBytes = N * props.dimension * typeSize / (1024 * 1024); */
float
ms
=
clocks_from_ticks
(
toc
-
tic
);
/* int total = 0; */
int
megaBytes
=
N
*
props
.
dimension
*
typeSize
/
(
1024
*
1024
);
/* MPI_Reduce(&megaBytes, &total, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); */
int
total
=
0
;
/* if (engine_rank == 0) */
MPI_Reduce
(
&
megaBytes
,
&
total
,
1
,
MPI_INT
,
MPI_SUM
,
0
,
MPI_COMM_WORLD
);
/* message("H5Dwrite for '%s' (%d MB) took %.3f %s (speed = %f MB/s).", */
if
(
engine_rank
==
0
)
/* props.name, total, ms, clocks_getunit(), total / (ms / 1000.)); */
message
(
"H5Dwrite for '%s' (%d MB) took %.3f %s (speed = %f MB/s)."
,
props
.
name
,
total
,
ms
,
clocks_getunit
(),
total
/
(
ms
/
1000
.));
#endif
/* Free and close everything */
/* Free and close everything */
free
(
temp
);
free
(
temp
);
...
@@ -372,7 +382,10 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
...
@@ -372,7 +382,10 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
const
struct
unit_system
*
snapshot_units
)
{
const
struct
unit_system
*
snapshot_units
)
{
const
size_t
typeSize
=
io_sizeof_type
(
props
.
type
);
const
size_t
typeSize
=
io_sizeof_type
(
props
.
type
);
/* const ticks tic = getticks(); */
#ifdef IO_SPEED_MEASUREMENT
const
ticks
tic
=
getticks
();
#endif
/* Work out properties of the array in the file */
/* Work out properties of the array in the file */
int
rank
;
int
rank
;
...
@@ -461,7 +474,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
...
@@ -461,7 +474,7 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
MPI_Allreduce
(
MPI_IN_PLACE
,
&
redo
,
1
,
MPI_SIGNED_CHAR
,
MPI_MAX
,
MPI_Allreduce
(
MPI_IN_PLACE
,
&
redo
,
1
,
MPI_SIGNED_CHAR
,
MPI_MAX
,
MPI_COMM_WORLD
);
MPI_COMM_WORLD
);
if
(
redo
/*
&& e->verbose
*/
&&
mpi_rank
==
0
)
if
(
redo
&&
e
->
verbose
&&
mpi_rank
==
0
)
message
(
"Need to redo one iteration for array '%s'"
,
props
.
name
);
message
(
"Need to redo one iteration for array '%s'"
,
props
.
name
);
}
}
...
@@ -487,10 +500,12 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
...
@@ -487,10 +500,12 @@ void writeArray(struct engine* e, hid_t grp, char* fileName, FILE* xmfFile,
H5Dclose
(
h_data
);
H5Dclose
(
h_data
);
H5Pclose
(
h_plist_id
);
H5Pclose
(
h_plist_id
);
/* MPI_Barrier(MPI_COMM_WORLD); */
#ifdef IO_SPEED_MEASUREMENT
/* if(engine_rank == 0) */
MPI_Barrier
(
MPI_COMM_WORLD
);
/* message( "'%s' took %.3f %s." , props.name, */
if
(
engine_rank
==
0
)
/* clocks_from_ticks(getticks() - tic), clocks_getunit()); */
message
(
"'%s' took %.3f %s."
,
props
.
name
,
clocks_from_ticks
(
getticks
()
-
tic
),
clocks_getunit
());
#endif
}
}
/**
/**
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment