Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
SWIFTsim
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
SWIFT
SWIFTsim
Commits
86d98e91
Commit
86d98e91
authored
6 years ago
by
Matthieu Schaller
Committed by
Matthieu Schaller
6 years ago
Browse files
Options
Downloads
Patches
Plain Diff
Correct calculation of the offsets in MPI-land. Use in-place reductions.
parent
20fc8a25
No related branches found
No related tags found
1 merge request
!710
Snapshot offsets
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
src/common_io.c
+88
-25
88 additions, 25 deletions
src/common_io.c
src/parallel_io.c
+13
-8
13 additions, 8 deletions
src/parallel_io.c
with
101 additions
and
33 deletions
src/common_io.c
+
88
−
25
View file @
86d98e91
...
...
@@ -401,6 +401,9 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
message
(
"global offsets=%lld"
,
global_offsets
[
0
]);
int
*
node
=
NULL
;
node
=
malloc
(
nr_cells
*
sizeof
(
int
));
/* Count of particles in each cell */
long
long
*
count_part
=
NULL
,
*
count_gpart
=
NULL
,
*
count_spart
=
NULL
;
count_part
=
malloc
(
nr_cells
*
sizeof
(
long
long
));
...
...
@@ -420,6 +423,9 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
/* Collect the cell information of *local* cells */
int
count_local_cells
=
0
;
long
long
local_offset_part
=
0
;
long
long
local_offset_gpart
=
0
;
long
long
local_offset_spart
=
0
;
for
(
int
i
=
0
;
i
<
nr_cells
;
++
i
)
{
if
(
cells_top
[
i
].
nodeID
==
nodeID
)
{
...
...
@@ -440,16 +446,16 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
/* Offsets including the global offset of all particles on this MPI rank
*/
if
(
i
>
0
)
{
offset_part
[
i
]
=
offset_part
[
i
-
1
]
+
count_part
[
i
-
1
]
+
global_offsets
[
swift_type_gas
];
offset_gpart
[
i
]
=
offset_gpart
[
i
-
1
]
+
count_gpart
[
i
-
1
]
+
global_offsets
[
swift_type_dark_matter
];
offset_spart
[
i
]
=
offset_spart
[
i
-
1
]
+
count_spart
[
i
-
1
]
+
global_offsets
[
swift_type_stars
];
}
offset_part
[
i
]
=
local_offset_part
+
global_offsets
[
swift_type_gas
];
offset_gpart
[
i
]
=
local_offset_gpart
+
global_offsets
[
swift_type_dark_matter
];
offset_spart
[
i
]
=
local_offset_spart
+
global_offsets
[
swift_type_stars
];
++
count_local_cells
;
local_offset_part
+=
count_part
[
i
];
local_offset_gpart
+=
count_gpart
[
i
];
local_offset_spart
+=
count_spart
[
i
];
}
else
{
/* Just zero everything for the foregin cells */
...
...
@@ -466,27 +472,66 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
offset_gpart
[
i
]
=
0
;
offset_spart
[
i
]
=
0
;
}
node
[
i
]
=
cells_top
[
i
].
nodeID
;
}
#ifdef WITH_MPI
/* Now, reduce all the arrays. Note that we use a bit-by-bit OR here. This
is safe as we made sure only local cells have non-zero values. */
#ifdef WITH_MPI
MPI_Allreduce
(
MPI_IN_PLACE
,
count_part
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
count_gpart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
count_spart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
offset_part
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
offset_gpart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
offset_spart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
MPI_COMM_WORLD
);
MPI_Allreduce
(
MPI_IN_PLACE
,
centres
,
3
*
nr_cells
,
MPI_DOUBLE
,
MPI_BOR
,
MPI_COMM_WORLD
);
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
count_part
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
count_part
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
count_gpart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
count_gpart
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
count_spart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
count_spart
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
offset_part
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
offset_part
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
offset_gpart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
offset_gpart
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
offset_spart
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
offset_spart
,
NULL
,
nr_cells
,
MPI_LONG_LONG_INT
,
MPI_BOR
,
0
,
MPI_COMM_WORLD
);
}
/* For the centres we use a sum as MPI does not like bitwise operations
on floating point numbers */
if
(
nodeID
==
0
)
{
MPI_Reduce
(
MPI_IN_PLACE
,
centres
,
3
*
nr_cells
,
MPI_DOUBLE
,
MPI_SUM
,
0
,
MPI_COMM_WORLD
);
}
else
{
MPI_Reduce
(
centres
,
NULL
,
3
*
nr_cells
,
MPI_DOUBLE
,
MPI_SUM
,
0
,
MPI_COMM_WORLD
);
}
#endif
/* Only rank 0 actually writes */
...
...
@@ -517,6 +562,24 @@ void io_write_cell_offsets(hid_t h_grp, const int cdim[3],
H5Sclose
(
h_space
);
free
(
centres
);
/* Write the nodeIDs to the group */
shape
[
0
]
=
nr_cells
;
shape
[
1
]
=
1
;
h_space
=
H5Screate
(
H5S_SIMPLE
);
if
(
h_space
<
0
)
error
(
"Error while creating data space for cell centres"
);
h_err
=
H5Sset_extent_simple
(
h_space
,
1
,
shape
,
shape
);
if
(
h_err
<
0
)
error
(
"Error while changing shape of gas offsets data space."
);
h_data
=
H5Dcreate
(
h_grp
,
"Nodes"
,
io_hdf5_type
(
INT
),
h_space
,
H5P_DEFAULT
,
H5P_DEFAULT
,
H5P_DEFAULT
);
if
(
h_data
<
0
)
error
(
"Error while creating dataspace for gas offsets."
);
h_err
=
H5Dwrite
(
h_data
,
io_hdf5_type
(
INT
),
h_space
,
H5S_ALL
,
H5P_DEFAULT
,
node
);
if
(
h_err
<
0
)
error
(
"Error while writing centres."
);
H5Dclose
(
h_data
);
H5Sclose
(
h_space
);
free
(
node
);
/* Group containing the offsets for each particle type */
h_subgrp
=
H5Gcreate
(
h_grp
,
"Offsets"
,
H5P_DEFAULT
,
H5P_DEFAULT
,
H5P_DEFAULT
);
if
(
h_subgrp
<
0
)
error
(
"Error while creating offsets sub-group"
);
...
...
This diff is collapsed.
Click to expand it.
src/parallel_io.c
+
13
−
8
View file @
86d98e91
...
...
@@ -1282,19 +1282,24 @@ void write_output_parallel(struct engine* e, const char* baseName,
snprintf
(
fileName
,
FILENAME_BUFFER_SIZE
,
"%s_%04i.hdf5"
,
baseName
,
e
->
snapshot_output_count
);
if
(
nodeID
==
0
)
{
h_file
=
H5Fopen
(
fileName
,
H5F_ACC_RDWR
,
H5P_DEFAULT
);
if
(
h_file
<
0
)
hid_t
h_file_cells
,
h_grp_cells
;
if
(
e
->
nodeID
==
0
)
{
h_file_cells
=
H5Fopen
(
fileName
,
H5F_ACC_RDWR
,
H5P_DEFAULT
);
if
(
h_file_cells
<
0
)
error
(
"Error while opening file '%s' on rank %d."
,
fileName
,
mpi_rank
);
h_grp_cells
=
H5Gcreate
(
h_file_cells
,
"/Cells"
,
H5P_DEFAULT
,
H5P_DEFAULT
,
H5P_DEFAULT
);
if
(
h_grp_cells
<
0
)
error
(
"Error while creating cells group"
);
}
else
{
h_file
=
0
;
h_file
_cells
=
0
;
}
io_write_cell_offsets
(
h_
file
,
e
->
s
->
cdim
,
e
->
s
->
cells_top
,
e
->
s
->
nr_cells
,
e
->
s
->
width
,
e
->
nodeID
,
N_total
,
offset
);
io_write_cell_offsets
(
h_
grp_cells
,
e
->
s
->
cdim
,
e
->
s
->
cells_top
,
e
->
s
->
nr_cells
,
e
->
s
->
width
,
mpi_rank
,
N_total
,
offset
);
if
(
nodeID
==
0
)
{
H5Fclose
(
h_file
);
if
(
e
->
nodeID
==
0
)
{
H5Gclose
(
h_grp_cells
);
H5Fclose
(
h_file_cells
);
}
/* Prepare some file-access properties */
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment