Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
f1619486
Commit
f1619486
authored
Jul 05, 2016
by
Peter W. Draper
Browse files
Merge remote-tracking branch 'origin/master' into mpi_skip
Conflicts: src/engine.c
parents
4164f127
86851808
Changes
33
Hide whitespace changes
Inline
Side-by-side
README
View file @
f1619486
...
...
@@ -26,6 +26,8 @@ Valid options are:
-f {int} Overwrite the CPU frequency (Hz) to be used for time measurements
-g Run with an external gravitational potential
-G Run with self-gravity
-n {int} Execute a fixed number of time steps. Defaults to -1, which means
use the time_end parameter to stop.
-s Run with SPH
-t {int} The number of threads to use on each MPI rank. Defaults to 1 if not specified.
-v [12] Increase the level of verbosity 1: MPI-rank 0 writes
...
...
configure.ac
View file @
f1619486
...
...
@@ -72,9 +72,6 @@ if test "$enable_ipo" = "yes"; then
fi
fi
# Add libtool support.
LT_INIT
# Check for MPI. Need to do this before characterising the compiler (C99 mode),
# as this changes the compiler.
# We should consider using AX_PROG_CC_MPI to replace AC_PROG_CC when compiling
...
...
@@ -151,6 +148,9 @@ AM_CONDITIONAL([HAVEMPI],[test $enable_mpi = "yes"])
# Indicate that MPIRUN can be modified by an environement variable
AC_ARG_VAR(MPIRUN, Path to the mpirun command if non-standard)
# Add libtool support (now that CC is defined).
LT_INIT
# Need C99 and inline support.
AC_PROG_CC_C99
AC_C_INLINE
...
...
@@ -179,7 +179,7 @@ if test "$enable_opt" = "yes" ; then
ac_test_CFLAGS="yes"
CFLAGS="$old_CFLAGS $CFLAGS"
# Check SSE & AVX support (some overlap with AX_CC_MAXOPT).
# Check SSE & AVX support (some overlap with AX_CC_MAXOPT).
# Don't use the SIMD_FLAGS result with Intel compilers. The -x<code>
# value from AX_CC_MAXOPT should be sufficient.
AX_EXT
...
...
@@ -287,12 +287,75 @@ AC_SUBST([METIS_LIBS])
AC_SUBST([METIS_INCS])
AM_CONDITIONAL([HAVEMETIS],[test -n "$METIS_LIBS"])
# # Check for zlib.
# AC_CHECK_LIB([z],[gzopen],[
# AC_DEFINE([HAVE_LIBZ],[1],[Set to 1 if zlib is installed.])
# LDFLAGS="$LDFLAGS -lz"
# ],[])
# Check for tcmalloc a fast malloc that is part of the gperftools.
have_tcmalloc="no"
AC_ARG_WITH([tcmalloc],
[AS_HELP_STRING([--with-tcmalloc],
[use tcmalloc library or specify the directory with lib @<:@yes/no@:>@]
)],
[with_tcmalloc="$withval"],
[with_tcmalloc="no"]
)
if test "x$with_tcmalloc" != "xno"; then
if test "x$with_tcmalloc" != "xyes" && test "x$with_tcmalloc" != "x"; then
tclibs="-L$with_tcmalloc -ltcmalloc"
else
tclibs="-ltcmalloc"
fi
AC_CHECK_LIB([tcmalloc],[tc_cfree],[have_tcmalloc="yes"],[have_tcmalloc="no"],
$tclibs)
# Could just have the minimal version.
if test "$have_tcmalloc" = "no"; then
if test "x$with_tcmalloc" != "xyes" && test "x$with_tcmalloc" != "x"; then
tclibs="-L$with_tcmalloc -ltcmalloc_minimal"
else
tclibs="-ltcmalloc_minimal"
fi
AC_CHECK_LIB([tcmalloc],[tc_cfree],[have_tcmalloc="yes"],[have_tcmalloc="no"],
$tclibs)
fi
if test "$have_tcmalloc" = "yes"; then
TCMALLOC_LIBS="$tclibs"
# These are recommended for GCC.
if test "$ax_cv_c_compiler_vendor" = "gnu"; then
CFLAGS="$CFLAGS -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free"
fi
else
TCMALLOC_LIBS=""
fi
fi
AC_SUBST([TCMALLOC_LIBS])
AM_CONDITIONAL([HAVETCMALLOC],[test -n "$TCMALLOC_LIBS"])
# Check for -lprofiler usually part of the gpreftools along with tcmalloc.
have_profiler="no"
AC_ARG_WITH([profiler],
[AS_HELP_STRING([--with-profiler],
[use cpu profiler library or specify the directory with lib @<:@yes/no@:>@]
)],
[with_profiler="$withval"],
[with_profiler="yes"]
)
if test "x$with_profiler" != "xno"; then
if test "x$with_profiler" != "xyes" && test "x$with_profiler" != "x"; then
proflibs="-L$with_profiler -lprofiler"
else
proflibs="-lprofiler"
fi
AC_CHECK_LIB([profiler],[ProfilerFlush],[have_profiler="yes"],[have_profiler="no"],
$proflibs)
if test "$have_profiler" = "yes"; then
PROFILER_LIBS="$proflibs"
else
PROFILER_LIBS=""
fi
fi
AC_SUBST([PROFILER_LIBS])
AM_CONDITIONAL([HAVEPROFILER],[test -n "$PROFILER_LIBS"])
# Check for HDF5. This is required.
AX_LIB_HDF5
...
...
@@ -410,6 +473,8 @@ AC_MSG_RESULT([
- parallel : $have_parallel_hdf5
Metis enabled : $have_metis
libNUMA enabled : $have_numa
Using tcmalloc : $have_tcmalloc
CPU profiler : $have_profiler
])
# Generate output.
...
...
examples/Makefile.am
View file @
f1619486
...
...
@@ -21,13 +21,17 @@ MYFLAGS = -DTIMER
# Add the source directory and debug to CFLAGS
AM_CFLAGS
=
-I
../src
$(HDF5_CPPFLAGS)
AM_LDFLAGS
=
AM_LDFLAGS
=
$(HDF5_LDFLAGS)
MPI_THREAD_LIBS
=
@MPI_THREAD_LIBS@
# Extra libraries.
EXTRA_LIBS
=
$(HDF5_LIBS)
$(PROFILER_LIBS)
$(TCMALLOC_LIBS)
# MPI libraries.
MPI_LIBS
=
$(METIS_LIBS)
$(MPI_THREAD_LIBS)
MPI_FLAGS
=
-DWITH_MPI
$(METIS_INCS)
# Set-up the library
# Programs.
bin_PROGRAMS
=
swift swift_fixdt
# Build MPI versions as well?
...
...
@@ -45,20 +49,20 @@ endif
# Sources for swift
swift_SOURCES
=
main.c
swift_CFLAGS
=
$(MYFLAGS)
$(AM_CFLAGS)
-DENGINE_POLICY
=
"engine_policy_keep
$(ENGINE_POLICY_SETAFFINITY)
"
swift_LDADD
=
../src/.libs/libswiftsim.a
$(
HDF5_LDFLAGS)
$(HDF5
_LIBS)
swift_LDADD
=
../src/.libs/libswiftsim.a
$(
EXTRA
_LIBS)
swift_fixdt_SOURCES
=
main.c
swift_fixdt_CFLAGS
=
$(MYFLAGS)
$(AM_CFLAGS)
-DENGINE_POLICY
=
"engine_policy_fixdt | engine_policy_keep
$(ENGINE_POLICY_SETAFFINITY)
"
swift_fixdt_LDADD
=
../src/.libs/libswiftsim.a
$(
HDF5_LDFLAGS)
$(HDF5
_LIBS)
swift_fixdt_LDADD
=
../src/.libs/libswiftsim.a
$(
EXTRA
_LIBS)
# Sources for swift_mpi, do we need an affinity policy for MPI?
swift_mpi_SOURCES
=
main.c
swift_mpi_CFLAGS
=
$(MYFLAGS)
$(AM_CFLAGS)
$(MPI_FLAGS)
-DENGINE_POLICY
=
"engine_policy_keep
$(ENGINE_POLICY_SETAFFINITY)
"
swift_mpi_LDADD
=
../src/.libs/libswiftsim_mpi.a
$(
HDF5_LDFLAGS)
$(HDF5
_LIBS)
$(
MPI
_LIBS)
swift_mpi_LDADD
=
../src/.libs/libswiftsim_mpi.a
$(
MPI
_LIBS)
$(
EXTRA
_LIBS)
swift_fixdt_mpi_SOURCES
=
main.c
swift_fixdt_mpi_CFLAGS
=
$(MYFLAGS)
$(AM_CFLAGS)
$(MPI_FLAGS)
-DENGINE_POLICY
=
"engine_policy_fixdt | engine_policy_keep
$(ENGINE_POLICY_SETAFFINITY)
"
swift_fixdt_mpi_LDADD
=
../src/.libs/libswiftsim_mpi.a
$(
HDF5_LDFLAGS)
$(HDF5
_LIBS)
$(
MPI
_LIBS)
swift_fixdt_mpi_LDADD
=
../src/.libs/libswiftsim_mpi.a
$(
MPI
_LIBS)
$(
EXTRA
_LIBS)
# Scripts to generate ICs
EXTRA_DIST
=
UniformBox/makeIC.py UniformBox/run.sh UniformBox/uniformBox.yml
\
...
...
examples/main.c
View file @
f1619486
...
...
@@ -74,6 +74,7 @@ void print_help_message() {
printf
(
" %2s %8s %s
\n
"
,
"-g"
,
""
,
"Run with an external gravitational potential"
);
printf
(
" %2s %8s %s
\n
"
,
"-G"
,
""
,
"Run with self-gravity"
);
printf
(
" %2s %8s %s
\n
"
,
"-n"
,
"{int}"
,
"Execute a fixed number of time steps"
);
printf
(
" %2s %8s %s
\n
"
,
"-s"
,
""
,
"Run with SPH"
);
printf
(
" %2s %8s %s
\n
"
,
"-t"
,
"{int}"
,
"The number of threads to use on each MPI rank. Defaults to 1 if not "
...
...
@@ -138,6 +139,7 @@ int main(int argc, char *argv[]) {
int
with_aff
=
0
;
int
dry_run
=
0
;
int
dump_tasks
=
0
;
int
nsteps
=
-
1
;
int
with_cosmology
=
0
;
int
with_external_gravity
=
0
;
int
with_self_gravity
=
0
;
...
...
@@ -150,7 +152,7 @@ int main(int argc, char *argv[]) {
/* Parse the parameters */
int
c
;
while
((
c
=
getopt
(
argc
,
argv
,
"acdef:gGhst:v:y:"
))
!=
-
1
)
switch
(
c
)
{
while
((
c
=
getopt
(
argc
,
argv
,
"acdef:gGh
n:
st:v:y:"
))
!=
-
1
)
switch
(
c
)
{
case
'a'
:
with_aff
=
1
;
break
;
...
...
@@ -179,6 +181,13 @@ int main(int argc, char *argv[]) {
case
'h'
:
if
(
myrank
==
0
)
print_help_message
();
return
0
;
case
'n'
:
if
(
sscanf
(
optarg
,
"%d"
,
&
nsteps
)
!=
1
)
{
if
(
myrank
==
0
)
printf
(
"Error parsing fixed number of steps.
\n
"
);
if
(
myrank
==
0
)
print_help_message
();
return
1
;
}
break
;
case
's'
:
with_hydro
=
1
;
break
;
...
...
@@ -322,18 +331,21 @@ int main(int argc, char *argv[]) {
size_t
Ngas
=
0
,
Ngpart
=
0
;
double
dim
[
3
]
=
{
0
.,
0
.,
0
.};
int
periodic
=
0
;
int
flag_entropy_ICs
=
0
;
if
(
myrank
==
0
)
clocks_gettime
(
&
tic
);
#if defined(WITH_MPI)
#if defined(HAVE_PARALLEL_HDF5)
read_ic_parallel
(
ICfileName
,
dim
,
&
parts
,
&
gparts
,
&
Ngas
,
&
Ngpart
,
&
periodic
,
myrank
,
nr_nodes
,
MPI_COMM_WORLD
,
MPI_INFO_NULL
,
dry_run
);
&
flag_entropy_ICs
,
myrank
,
nr_nodes
,
MPI_COMM_WORLD
,
MPI_INFO_NULL
,
dry_run
);
#else
read_ic_serial
(
ICfileName
,
dim
,
&
parts
,
&
gparts
,
&
Ngas
,
&
Ngpart
,
&
periodic
,
myrank
,
nr_nodes
,
MPI_COMM_WORLD
,
MPI_INFO_NULL
,
dry_run
);
&
flag_entropy_ICs
,
myrank
,
nr_nodes
,
MPI_COMM_WORLD
,
MPI_INFO_NULL
,
dry_run
);
#endif
#else
read_ic_single
(
ICfileName
,
dim
,
&
parts
,
&
gparts
,
&
Ngas
,
&
Ngpart
,
&
periodic
,
dry_run
);
&
flag_entropy_ICs
,
dry_run
);
#endif
if
(
myrank
==
0
)
{
clocks_gettime
(
&
toc
);
...
...
@@ -354,7 +366,7 @@ int main(int argc, char *argv[]) {
free
(
parts
);
parts
=
NULL
;
for
(
size_t
k
=
0
;
k
<
Ngpart
;
++
k
)
if
(
gparts
[
k
].
id
>
0
)
error
(
"Linking problem"
);
if
(
gparts
[
k
].
id
_or_neg_offset
<
0
)
error
(
"Linking problem"
);
Ngas
=
0
;
}
...
...
@@ -466,7 +478,7 @@ int main(int argc, char *argv[]) {
#endif
/* Initialise the particles */
engine_init_particles
(
&
e
);
engine_init_particles
(
&
e
,
flag_entropy_ICs
);
/* Legend */
if
(
myrank
==
0
)
...
...
@@ -474,7 +486,7 @@ int main(int argc, char *argv[]) {
"Updates"
,
"g-Updates"
,
"Wall-clock time"
,
clocks_getunit
());
/* Main simulation loop */
for
(
int
j
=
0
;
!
engine_is_done
(
&
e
);
j
++
)
{
for
(
int
j
=
0
;
!
engine_is_done
(
&
e
)
&&
e
.
step
!=
nsteps
;
j
++
)
{
/* Repartition the space amongst the nodes? */
#ifdef WITH_MPI
...
...
src/Makefile.am
View file @
f1619486
...
...
@@ -24,6 +24,13 @@ AM_LDFLAGS = $(HDF5_LDFLAGS) -version-info 0:0:0
# The git command, if available.
GIT_CMD
=
@GIT_CMD@
# Additional dependencies for shared libraries.
EXTRA_LIBS
=
$(HDF5_LIBS)
$(PROFILER_LIBS)
$(TCMALLOC_LIBS)
# MPI libraries.
MPI_LIBS
=
$(METIS_LIBS)
$(MPI_THREAD_LIBS)
MPI_FLAGS
=
-DWITH_MPI
$(METIS_INCS)
# Build the libswiftsim library
lib_LTLIBRARIES
=
libswiftsim.la
# Build a MPI-enabled version too?
...
...
@@ -65,11 +72,13 @@ nobase_noinst_HEADERS = approx_math.h atomic.h cycle.h error.h inline.h kernel_h
# Sources and flags for regular library
libswiftsim_la_SOURCES
=
$(AM_SOURCES)
libswiftsim_la_CFLAGS
=
$(AM_CFLAGS)
libswiftsim_la_LDFLAGS
=
$(AM_LDFLAGS)
$(EXTRA_LIBS)
# Sources and flags for MPI library
libswiftsim_mpi_la_SOURCES
=
$(AM_SOURCES)
libswiftsim_mpi_la_CFLAGS
=
$(AM_CFLAGS)
-DWITH_MPI
$(METIS_INC
S)
libswiftsim_mpi_la_LDFLAGS
=
$(AM_LDFLAGS)
-DWITH_MPI
$(METIS
_LIBS)
libswiftsim_mpi_la_CFLAGS
=
$(AM_CFLAGS)
$(MPI_FLAG
S)
libswiftsim_mpi_la_LDFLAGS
=
$(AM_LDFLAGS)
$(MPI_LIBS)
$(EXTRA
_LIBS)
libswiftsim_mpi_la_SHORTNAME
=
mpi
...
...
src/cell.c
View file @
f1619486
...
...
@@ -222,7 +222,7 @@ int cell_pack_ti_ends(struct cell *c, int *ti_ends) {
/* Pack this cell's data. */
ti_ends
[
0
]
=
c
->
ti_end_min
;
/* Fill in the progeny, depth-first recursion. */
int
count
=
1
;
for
(
int
k
=
0
;
k
<
8
;
k
++
)
...
...
@@ -238,7 +238,7 @@ int cell_unpack_ti_ends(struct cell *c, int *ti_ends) {
/* Unpack this cell's data. */
c
->
ti_end_min
=
ti_ends
[
0
];
/* Fill in the progeny, depth-first recursion. */
int
count
=
1
;
for
(
int
k
=
0
;
k
<
8
;
k
++
)
...
...
@@ -410,9 +410,11 @@ void cell_gunlocktree(struct cell *c) {
* @brief Sort the parts into eight bins along the given pivots.
*
* @param c The #cell array to be sorted.
* @param parts_offset Offset of the cell parts array relative to the
* space's parts array, i.e. c->parts - s->parts.
*/
void
cell_split
(
struct
cell
*
c
)
{
void
cell_split
(
struct
cell
*
c
,
ptrdiff_t
parts_offset
)
{
int
i
,
j
;
const
int
count
=
c
->
count
,
gcount
=
c
->
gcount
;
...
...
@@ -530,8 +532,7 @@ void cell_split(struct cell *c) {
}
/* Re-link the gparts. */
for
(
int
k
=
0
;
k
<
count
;
k
++
)
if
(
parts
[
k
].
gpart
!=
NULL
)
parts
[
k
].
gpart
->
part
=
&
parts
[
k
];
part_relink_gparts
(
parts
,
count
,
parts_offset
);
#ifdef SWIFT_DEBUG_CHECKS
/* Verify that _all_ the parts have been assigned to a cell. */
...
...
@@ -626,8 +627,7 @@ void cell_split(struct cell *c) {
}
/* Re-link the parts. */
for
(
int
k
=
0
;
k
<
gcount
;
k
++
)
if
(
gparts
[
k
].
id
>
0
)
gparts
[
k
].
part
->
gpart
=
&
gparts
[
k
];
part_relink_parts
(
gparts
,
gcount
,
parts
-
parts_offset
);
}
/**
...
...
src/cell.h
View file @
f1619486
...
...
@@ -24,6 +24,9 @@
#define SWIFT_CELL_H
/* Includes. */
#include
<stddef.h>
/* Local includes. */
#include
"lock.h"
#include
"multipole.h"
#include
"part.h"
...
...
@@ -178,7 +181,7 @@ struct cell {
((int)(k) + (cdim)[2] * ((int)(j) + (cdim)[1] * (int)(i)))
/* Function prototypes. */
void
cell_split
(
struct
cell
*
c
);
void
cell_split
(
struct
cell
*
c
,
ptrdiff_t
parts_offset
);
int
cell_locktree
(
struct
cell
*
c
);
void
cell_unlocktree
(
struct
cell
*
c
);
int
cell_glocktree
(
struct
cell
*
c
);
...
...
src/common_io.c
View file @
f1619486
...
...
@@ -516,12 +516,10 @@ void prepare_dm_gparts(struct gpart* const gparts, size_t Ndm) {
/* Let's give all these gparts a negative id */
for
(
size_t
i
=
0
;
i
<
Ndm
;
++
i
)
{
/* 0 or negative ids are not allowed */
if
(
gparts
[
i
].
id
<=
0
)
error
(
"0 or negative ID for DM particle %zd: ID=%lld"
,
i
,
gparts
[
i
].
id
);
gparts
[
i
].
id
=
-
gparts
[
i
].
id
;
if
(
gparts
[
i
].
id_or_neg_offset
<=
0
)
error
(
"0 or negative ID for DM particle %zd: ID=%lld"
,
i
,
gparts
[
i
].
id_or_neg_offset
);
}
}
...
...
@@ -555,7 +553,7 @@ void duplicate_hydro_gparts(struct part* const parts,
gparts
[
i
+
Ndm
].
mass
=
parts
[
i
].
mass
;
/* Link the particles */
gparts
[
i
+
Ndm
].
part
=
&
parts
[
i
]
;
gparts
[
i
+
Ndm
].
id_or_neg_offset
=
-
i
;
parts
[
i
].
gpart
=
&
gparts
[
i
+
Ndm
];
}
}
...
...
@@ -580,9 +578,8 @@ void collect_dm_gparts(const struct gpart* const gparts, size_t Ntot,
* gparts[i].part); */
/* And collect the DM ones */
if
(
gparts
[
i
].
id
<
0LL
)
{
memcpy
(
&
dmparts
[
count
],
&
gparts
[
i
],
sizeof
(
struct
gpart
));
dmparts
[
count
].
id
=
-
dmparts
[
count
].
id
;
if
(
gparts
[
i
].
id_or_neg_offset
>
0
)
{
dmparts
[
count
]
=
gparts
[
i
];
count
++
;
}
}
...
...
src/debug.c
View file @
f1619486
...
...
@@ -59,8 +59,8 @@
*
* (Should be used for debugging only as it runs in O(N).)
*/
void
printParticle
(
struct
part
*
parts
,
struct
xpart
*
xparts
,
long
long
int
id
,
size_t
N
)
{
void
printParticle
(
const
struct
part
*
parts
,
struct
xpart
*
xparts
,
long
long
int
id
,
size_t
N
)
{
int
found
=
0
;
...
...
@@ -82,24 +82,27 @@ void printParticle(struct part *parts, struct xpart *xparts, long long int id,
* the standard output.
*
* @param gparts The array of g-particles.
* @param parts The array of particles.
* @param id The id too look for.
* @param N The size of the array of g-particles.
*
* (Should be used for debugging only as it runs in O(N).)
*/
void
printgParticle
(
struct
gpart
*
gparts
,
long
long
int
id
,
size_t
N
)
{
void
printgParticle
(
const
struct
gpart
*
gparts
,
const
struct
part
*
parts
,
long
long
int
id
,
size_t
N
)
{
int
found
=
0
;
/* Look for the particle. */
for
(
size_t
i
=
0
;
i
<
N
;
i
++
)
if
(
gparts
[
i
].
id
==
-
id
)
{
printf
(
"## gParticle[%zd] (DM) :
\n
id=%lld
"
,
i
,
-
gparts
[
i
].
id
);
if
(
gparts
[
i
].
id
_or_neg_offset
==
id
)
{
printf
(
"## gParticle[%zd] (DM) :
\n
id=%lld"
,
i
,
id
);
gravity_debug_particle
(
&
gparts
[
i
]);
found
=
1
;
break
;
}
else
if
(
gparts
[
i
].
id
>
0
&&
gparts
[
i
].
part
->
id
==
id
)
{
printf
(
"## gParticle[%zd] (hydro) :
\n
id=%lld "
,
i
,
gparts
[
i
].
id
);
}
else
if
(
gparts
[
i
].
id_or_neg_offset
<
0
&&
parts
[
-
gparts
[
i
].
id_or_neg_offset
].
id
==
id
)
{
printf
(
"## gParticle[%zd] (hydro) :
\n
id=%lld"
,
i
,
id
);
gravity_debug_particle
(
&
gparts
[
i
]);
found
=
1
;
break
;
...
...
@@ -114,9 +117,9 @@ void printgParticle(struct gpart *gparts, long long int id, size_t N) {
* @param p The particle to print
* @param xp The extended data ot the particle to print
*/
void
printParticle_single
(
struct
part
*
p
,
struct
xpart
*
xp
)
{
void
printParticle_single
(
const
struct
part
*
p
,
const
struct
xpart
*
xp
)
{
printf
(
"## Particle: id=%lld
"
,
p
->
id
);
printf
(
"## Particle: id=%lld"
,
p
->
id
);
hydro_debug_particle
(
p
,
xp
);
printf
(
"
\n
"
);
}
...
...
@@ -128,7 +131,7 @@ void printParticle_single(struct part *p, struct xpart *xp) {
*/
void
printgParticle_single
(
struct
gpart
*
gp
)
{
printf
(
"## g-Particle: id=%lld "
,
gp
->
id
);
printf
(
"## g-Particle: id=%lld "
,
gp
->
id
_or_neg_offset
);
gravity_debug_particle
(
gp
);
printf
(
"
\n
"
);
}
...
...
src/debug.h
View file @
f1619486
...
...
@@ -19,14 +19,15 @@
#ifndef SWIFT_DEBUG_H
#define SWIFT_DEBUG_H
struct
part
;
struct
gpart
;
struct
x
part
;
/* Includes. */
#include
"cell.h"
#include
"
part
.h"
void
printParticle
(
struct
part
*
parts
,
struct
xpart
*
xparts
,
long
long
int
id
,
size_t
N
);
void
printgParticle
(
struct
gpart
*
parts
,
long
long
int
id
,
size_t
N
);
void
printParticle_single
(
struct
part
*
p
,
struct
xpart
*
xp
);
void
printParticle
(
const
struct
part
*
parts
,
struct
xpart
*
xparts
,
long
long
int
id
,
size_t
N
);
void
printgParticle
(
const
struct
gpart
*
gparts
,
const
struct
part
*
parts
,
long
long
int
id
,
size_t
N
);
void
printParticle_single
(
const
struct
part
*
p
,
const
struct
xpart
*
xp
);
void
printgParticle_single
(
struct
gpart
*
gp
);
#ifdef HAVE_METIS
...
...
src/engine.c
View file @
f1619486
...
...
@@ -102,8 +102,8 @@ struct link *engine_addlink(struct engine *e, struct link *l, struct task *t) {
}
/**
* @brief Generate the hierarchical tasks for a hierarchy of cells -
i.e. all
* the O(Npart) tasks.
* @brief Generate the
gravity
hierarchical tasks for a hierarchy of cells -
*
i.e. all
the O(Npart) tasks.
*
* Tasks are only created here. The dependencies will be added later on.
*
...
...
@@ -111,8 +111,8 @@ struct link *engine_addlink(struct engine *e, struct link *l, struct task *t) {
* @param c The #cell.
* @param super The super #cell.
*/
void
engine_make_hierarchical_tasks
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
cell
*
super
)
{
void
engine_make_
gravity_
hierarchical_tasks
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
cell
*
super
)
{
struct
scheduler
*
s
=
&
e
->
sched
;
const
int
is_with_external_gravity
=
...
...
@@ -132,36 +132,91 @@ void engine_make_hierarchical_tasks(struct engine *e, struct cell *c,
if
(
c
->
nodeID
==
e
->
nodeID
)
{
/* Add the init task. */
c
->
init
=
scheduler_addtask
(
s
,
task_type_init
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
if
(
c
->
init
==
NULL
)
c
->
init
=
scheduler_addtask
(
s
,
task_type_init
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
/* Add the drift task. */
c
->
drift
=
scheduler_addtask
(
s
,
task_type_drift
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
if
(
c
->
drift
==
NULL
)
c
->
drift
=
scheduler_addtask
(
s
,
task_type_drift
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
/* Add the kick task that matches the policy. */
if
(
is_fixdt
)
{
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick_fixdt
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
if
(
c
->
kick
==
NULL
)
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick_fixdt
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
else
{
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
if
(
c
->
kick
==
NULL
)
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
if
(
c
->
count
>
0
)
{
if
(
is_with_external_gravity
)
c
->
grav_external
=
scheduler_addtask
(
s
,
task_type_grav_external
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
}
/* Generate the ghost task. */
c
->
ghost
=
scheduler_addtask
(
s
,
task_type_ghost
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
/* Set the super-cell. */
c
->
super
=
super
;
/* Recurse. */
if
(
c
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
engine_make_gravity_hierarchical_tasks
(
e
,
c
->
progeny
[
k
],
super
);
}
if
(
c
->
gcount
>
0
)
{
/**
* @brief Generate the hydro hierarchical tasks for a hierarchy of cells -
* i.e. all the O(Npart) tasks.
*
* Tasks are only created here. The dependencies will be added later on.
*
* @param e The #engine.
* @param c The #cell.
* @param super The super #cell.
*/
void
engine_make_hydro_hierarchical_tasks
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
cell
*
super
)
{
struct
scheduler
*
s
=
&
e
->
sched
;
const
int
is_fixdt
=
(
e
->
policy
&
engine_policy_fixdt
)
==
engine_policy_fixdt
;
/* Add the external gravity tasks */
if
(
is_with_external_gravity
)
c
->
grav_external
=
scheduler_addtask
(
s
,
task_type_grav_external
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
/* Is this the super-cell? */
if
(
super
==
NULL
&&
(
c
->
density
!=
NULL
||
(
c
->
count
>
0
&&
!
c
->
split
)))
{
/* This is the super cell, i.e. the first with density tasks attached. */
super
=
c
;
/* Local tasks only... */
if
(
c
->
nodeID
==
e
->
nodeID
)
{
/* Add the init task. */
if
(
c
->
init
==
NULL
)
c
->
init
=
scheduler_addtask
(
s
,
task_type_init
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
/* Add the drift task. */
if
(
c
->
drift
==
NULL
)
c
->
drift
=
scheduler_addtask
(
s
,
task_type_drift
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
/* Add the kick task that matches the policy. */
if
(
is_fixdt
)
{
if
(
c
->
kick
==
NULL
)
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick_fixdt
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
else
{
if
(
c
->
kick
==
NULL
)
c
->
kick
=
scheduler_addtask
(
s
,
task_type_kick
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
/* Generate the ghost task. */
c
->
ghost
=
scheduler_addtask
(
s
,
task_type_ghost
,
task_subtype_none
,
0
,
0
,
c
,
NULL
,
0
);
}
}
...
...
@@ -172,7 +227,7 @@ void engine_make_hierarchical_tasks(struct engine *e, struct cell *c,
if
(
c
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
engine_make_hierarchical_tasks
(
e
,
c
->
progeny
[
k
],
super
);
engine_make_
hydro_
hierarchical_tasks
(
e
,
c
->
progeny
[
k
],
super
);
}
/**
...
...
@@ -270,11 +325,11 @@ void engine_redistribute(struct engine *e) {
}
#ifdef SWIFT_DEBUG_CHECKS
if
(
s
->
parts
[
k
].
gpart
->
id
<
0
)