Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
9dbbde7a
Commit
9dbbde7a
authored
Dec 02, 2018
by
Loic Hausammann
Browse files
implement the stars density over mpi
parent
aa863097
Changes
13
Hide whitespace changes
Inline
Side-by-side
examples/main.c
View file @
9dbbde7a
...
...
@@ -456,7 +456,6 @@ int main(int argc, char *argv[]) {
#ifdef WITH_MPI
if
(
with_mpole_reconstruction
&&
nr_nodes
>
1
)
error
(
"Cannot reconstruct m-poles every step over MPI (yet)."
);
if
(
with_feedback
)
error
(
"Can't run with feedback over MPI (yet)."
);
if
(
with_star_formation
)
error
(
"Can't run with star formation over MPI (yet)"
);
if
(
with_limiter
)
error
(
"Can't run with time-step limiter over MPI (yet)"
);
...
...
src/cell.c
View file @
9dbbde7a
...
...
@@ -181,6 +181,7 @@ int cell_pack(struct cell *restrict c, struct pcell *restrict pc,
/* Start by packing the data of the current cell. */
pc
->
hydro
.
h_max
=
c
->
hydro
.
h_max
;
pc
->
stars
.
h_max
=
c
->
stars
.
h_max
;
pc
->
hydro
.
ti_end_min
=
c
->
hydro
.
ti_end_min
;
pc
->
hydro
.
ti_end_max
=
c
->
hydro
.
ti_end_max
;
pc
->
grav
.
ti_end_min
=
c
->
grav
.
ti_end_min
;
...
...
@@ -285,6 +286,7 @@ int cell_unpack(struct pcell *restrict pc, struct cell *restrict c,
/* Unpack the current pcell. */
c
->
hydro
.
h_max
=
pc
->
hydro
.
h_max
;
c
->
stars
.
h_max
=
pc
->
stars
.
h_max
;
c
->
hydro
.
ti_end_min
=
pc
->
hydro
.
ti_end_min
;
c
->
hydro
.
ti_end_max
=
pc
->
hydro
.
ti_end_max
;
c
->
grav
.
ti_end_min
=
pc
->
grav
.
ti_end_min
;
...
...
@@ -1838,6 +1840,9 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
*/
void
cell_activate_stars_sorts
(
struct
cell
*
c
,
int
sid
,
struct
scheduler
*
s
)
{
// TODO Alexei, remove this
if
(
c
->
nodeID
!=
engine_rank
)
return
;
/* Do we need to re-sort? */
if
(
c
->
stars
.
dx_max_sort
>
space_maxreldx
*
c
->
dmin
)
{
...
...
@@ -3144,6 +3149,10 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
struct
cell
*
cj
=
t
->
cj
;
const
int
ci_active
=
cell_is_active_stars
(
ci
,
e
);
const
int
cj_active
=
(
cj
!=
NULL
)
?
cell_is_active_stars
(
cj
,
e
)
:
0
;
#ifdef WITH_MPI
const
int
ci_nodeID
=
ci
->
nodeID
;
const
int
cj_nodeID
=
(
cj
!=
NULL
)
?
cj
->
nodeID
:
-
1
;
#endif
/* Only activate tasks that involve a local active cell. */
if
((
ci_active
&&
ci
->
nodeID
==
nodeID
)
||
...
...
@@ -3161,38 +3170,42 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Set the correct sorting flags and activate hydro drifts */
else
if
(
t
->
type
==
task_type_pair
)
{
/* Do ci */
/* stars for ci */
atomic_or
(
&
ci
->
stars
.
requires_sorts
,
1
<<
t
->
flags
);
ci
->
stars
.
dx_max_sort_old
=
ci
->
stars
.
dx_max_sort
;
/* hydro for cj */
atomic_or
(
&
cj
->
hydro
.
requires_sorts
,
1
<<
t
->
flags
);
cj
->
hydro
.
dx_max_sort_old
=
cj
->
hydro
.
dx_max_sort
;
/* Activate the drift tasks. */
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_spart
(
ci
,
s
);
if
(
cj
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
cj
,
s
);
/* Check the sorts and activate them if needed. */
cell_activate_stars_sorts
(
ci
,
t
->
flags
,
s
);
cell_activate_hydro_sorts
(
cj
,
t
->
flags
,
s
);
if
(
ci_active
&&
cj
->
hydro
.
count
!=
0
&&
ci
->
stars
.
count
!=
0
)
{
/* stars for ci */
atomic_or
(
&
ci
->
stars
.
requires_sorts
,
1
<<
t
->
flags
);
ci
->
stars
.
dx_max_sort_old
=
ci
->
stars
.
dx_max_sort
;
/* hydro for cj */
atomic_or
(
&
cj
->
hydro
.
requires_sorts
,
1
<<
t
->
flags
);
cj
->
hydro
.
dx_max_sort_old
=
cj
->
hydro
.
dx_max_sort
;
/* Activate the drift tasks. */
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_spart
(
ci
,
s
);
if
(
cj
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
cj
,
s
);
/* Check the sorts and activate them if needed. */
cell_activate_stars_sorts
(
ci
,
t
->
flags
,
s
);
cell_activate_hydro_sorts
(
cj
,
t
->
flags
,
s
);
}
/* Do cj */
/* hydro for ci */
atomic_or
(
&
ci
->
hydro
.
requires_sorts
,
1
<<
t
->
flags
);
ci
->
hydro
.
dx_max_sort_old
=
ci
->
hydro
.
dx_max_sort
;
/* stars for cj */
atomic_or
(
&
cj
->
stars
.
requires_sorts
,
1
<<
t
->
flags
);
cj
->
stars
.
dx_max_sort_old
=
cj
->
stars
.
dx_max_sort
;
/* Activate the drift tasks. */
if
(
cj
->
nodeID
==
nodeID
)
cell_activate_drift_spart
(
cj
,
s
);
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
ci
,
s
);
/* Check the sorts and activate them if needed. */
cell_activate_hydro_sorts
(
ci
,
t
->
flags
,
s
);
cell_activate_stars_sorts
(
cj
,
t
->
flags
,
s
);
if
(
cj_active
&&
ci
->
hydro
.
count
!=
0
&&
cj
->
stars
.
count
!=
0
)
{
/* hydro for ci */
atomic_or
(
&
ci
->
hydro
.
requires_sorts
,
1
<<
t
->
flags
);
ci
->
hydro
.
dx_max_sort_old
=
ci
->
hydro
.
dx_max_sort
;
/* stars for cj */
atomic_or
(
&
cj
->
stars
.
requires_sorts
,
1
<<
t
->
flags
);
cj
->
stars
.
dx_max_sort_old
=
cj
->
stars
.
dx_max_sort
;
/* Activate the drift tasks. */
if
(
cj
->
nodeID
==
nodeID
)
cell_activate_drift_spart
(
cj
,
s
);
if
(
ci
->
nodeID
==
nodeID
)
cell_activate_drift_part
(
ci
,
s
);
/* Check the sorts and activate them if needed. */
cell_activate_hydro_sorts
(
ci
,
t
->
flags
,
s
);
cell_activate_stars_sorts
(
cj
,
t
->
flags
,
s
);
}
}
/* Store current values of dx_max and h_max. */
...
...
@@ -3207,85 +3220,81 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Check whether there was too much particle motion, i.e. the
cell neighbour conditions were violated. */
if
(
cell_need_rebuild_for_stars_pair
(
ci
,
cj
))
rebuild
=
1
;
if
(
cell_need_rebuild_for_hydro_pair
(
ci
,
cj
))
rebuild
=
1
;
#ifdef WITH_MPI
error
(
"MPI with stars not implemented"
);
/* /\* Activate the send/recv tasks. *\/ */
/* if (ci->nodeID != nodeID) { */
/* /\* If the local cell is active, receive data from the foreign cell.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate(s, ci->hydro.recv_xv); */
/* if (ci_active) { */
/* scheduler_activate(s, ci->hydro.recv_rho); */
/* } */
/* } */
/* Activate the send/recv tasks. */
if
(
ci_nodeID
!=
nodeID
)
{
/* /\* If the foreign cell is active, we want its ti_end values. *\/ */
/* if (ci_active) scheduler_activate(s, ci->mpi.recv_ti); */
// TODO Alexei: In this section, you will find some comments that
// are from the hydro code. It should look the same for the feedback.
/* If the local cell is active, receive data from the foreign cell. */
if
(
cj_active
)
{
scheduler_activate
(
s
,
ci
->
mpi
.
hydro
.
recv_xv
);
/* if (ci_active) { */
/* scheduler_activate(s, ci->mpi.hydro.recv_rho); */
/* } */
}
/*
/\* I
s
the foreign cell active
and will need stuff from us?
*\/ */
/*
if (ci_active)
{
*/
/*
/\* I
f
the foreign cell
is
active
, we want its ti_end values.
*\/ */
/*
if (ci_active)
scheduler_activate(s, ci->mpi.recv_ti);
*/
/* scheduler_activate_send(s, cj->hydro.send_xv, ci->nodeID); */
/* Is the foreign cell active and will need stuff from us? */
if
(
ci_active
)
{
/* /\* Drift the cell which will be sent; note that not all sent */
/* particles will be drifted, only those that are needed. *\/ */
/* cell_activate_drift_part(cj, s); */
scheduler_activate_send
(
s
,
cj
->
mpi
.
hydro
.
send_xv
,
ci_nodeID
);
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate_send(s, cj->hydro.send_rho, ci->nodeID); */
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
cell_activate_drift_part
(
cj
,
s
);
/* } */
/* } */
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID); */
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti,
* ci->nodeID);
*/
/* } */
}
/* } else if (cj->nodeID != nodeID) { */
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti,
* ci_nodeID); */
/* /\* If the local cell is active, receive data from the foreign cell.
* *\/ */
/* if (ci_active) { */
/* scheduler_activate(s, cj->hydro.recv_xv); */
/* if (cj_active) { */
/* scheduler_activate(s, cj->hydro.recv_rho); */
}
else
if
(
cj_nodeID
!=
nodeID
)
{
/* } */
/* } */
/* If the local cell is active, receive data from the foreign cell. */
if
(
ci_active
)
{
scheduler_activate
(
s
,
cj
->
mpi
.
hydro
.
recv_xv
);
/* if (cj_active) { */
/* scheduler_activate(s, cj->mpi.hydro.recv_rho); */
/* } */
}
/*
/\* If the foreign cell is active, we want its ti_end values. *\/ */
/*
if (cj_active) scheduler_activate(s, cj->mpi.recv_ti); */
/*
/\* If the foreign cell is active, we want its ti_end values. *\/ */
/*
if (cj_active) scheduler_activate(s, cj->mpi.recv_ti); */
/*
/
\
* Is the foreign cell active and will need stuff from us?
*\/
*/
/*
if (cj_active) {
*/
/* Is the foreign cell active and will need stuff from us? */
if
(
cj_active
)
{
/*
scheduler_activate_send(s, ci->hydro.send_xv, cj
->
nodeID);
*/
scheduler_activate_send
(
s
,
ci
->
mpi
.
hydro
.
send_xv
,
cj
_
nodeID
);
/*
/
\
* Drift the cell which will be sent; note that not all sent
*/
/*
particles will be drifted, only those that are needed.
*\/
*/
/*
cell_activate_drift_part(ci, s);
*/
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
cell_activate_drift_part
(
ci
,
s
);
/*
/\* If the local cell is also active, more stuff will be needed.
* *\/ */
/*
if (ci_active) { */
/*
/\* If the local cell is also active, more stuff will be needed.
* *\/ */
/*
if (ci_active) { */
/*
scheduler_activate_send(s, ci->hydro.send_rho, cj
->
nodeID); */
/*
scheduler_activate_send(s, ci->
mpi.
hydro.send_rho, cj
_
nodeID); */
/*
} */
/* } */
/*
} */
}
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti,
* cj->nodeID);
*/
/* } */
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti,
* cj_nodeID); */
}
#endif
}
}
...
...
src/cell.h
View file @
9dbbde7a
...
...
@@ -992,6 +992,23 @@ cell_need_rebuild_for_hydro_pair(const struct cell *ci, const struct cell *cj) {
ci
->
hydro
.
dx_max_part
+
cj
->
hydro
.
dx_max_part
>
cj
->
dmin
);
}
/**
* @brief Have particles in a pair of cells moved too much and require a rebuild
* ?
*
* @param ci The first #cell.
* @param cj The second #cell.
*/
__attribute__
((
always_inline
))
INLINE
static
int
cell_need_rebuild_for_stars_pair
(
const
struct
cell
*
ci
,
const
struct
cell
*
cj
)
{
/* Is the cut-off radius plus the max distance the parts in both cells have */
/* moved larger than the cell size ? */
/* Note ci->dmin == cj->dmin */
return
(
kernel_gamma
*
max
(
ci
->
stars
.
h_max
,
cj
->
stars
.
h_max
)
+
ci
->
stars
.
dx_max_part
+
cj
->
stars
.
dx_max_part
>
cj
->
dmin
);
}
/**
* @brief Have star particles in a pair of cells moved too much and require a
...
...
src/debug.c
View file @
9dbbde7a
...
...
@@ -179,6 +179,14 @@ int checkSpacehmax(struct space *s) {
}
}
float
cell_stars_h_max
=
0
.
0
f
;
for
(
int
k
=
0
;
k
<
s
->
nr_cells
;
k
++
)
{
if
(
s
->
cells_top
[
k
].
nodeID
==
s
->
e
->
nodeID
&&
s
->
cells_top
[
k
].
stars
.
h_max
>
cell_stars_h_max
)
{
cell_stars_h_max
=
s
->
cells_top
[
k
].
stars
.
h_max
;
}
}
/* Now all particles. */
float
part_h_max
=
0
.
0
f
;
for
(
size_t
k
=
0
;
k
<
s
->
nr_parts
;
k
++
)
{
...
...
@@ -187,10 +195,21 @@ int checkSpacehmax(struct space *s) {
}
}
/* Now all the sparticles. */
float
spart_h_max
=
0
.
0
f
;
for
(
size_t
k
=
0
;
k
<
s
->
nr_sparts
;
k
++
)
{
if
(
s
->
sparts
[
k
].
h
>
spart_h_max
)
{
spart_h_max
=
s
->
sparts
[
k
].
h
;
}
}
/* If within some epsilon we are OK. */
if
(
fabsf
(
cell_h_max
-
part_h_max
)
<=
FLT_EPSILON
)
return
1
;
if
(
fabsf
(
cell_h_max
-
part_h_max
)
<=
FLT_EPSILON
&&
fabsf
(
cell_stars_h_max
-
spart_h_max
)
<=
FLT_EPSILON
)
return
1
;
/* There is a problem. Hunt it down. */
/* part */
for
(
int
k
=
0
;
k
<
s
->
nr_cells
;
k
++
)
{
if
(
s
->
cells_top
[
k
].
nodeID
==
s
->
e
->
nodeID
)
{
if
(
s
->
cells_top
[
k
].
hydro
.
h_max
>
part_h_max
)
{
...
...
@@ -207,6 +226,23 @@ int checkSpacehmax(struct space *s) {
}
}
/* spart */
for
(
int
k
=
0
;
k
<
s
->
nr_cells
;
k
++
)
{
if
(
s
->
cells_top
[
k
].
nodeID
==
s
->
e
->
nodeID
)
{
if
(
s
->
cells_top
[
k
].
stars
.
h_max
>
spart_h_max
)
{
message
(
"cell %d is inconsistent (%f > %f)"
,
k
,
s
->
cells_top
[
k
].
stars
.
h_max
,
spart_h_max
);
}
}
}
for
(
size_t
k
=
0
;
k
<
s
->
nr_sparts
;
k
++
)
{
if
(
s
->
sparts
[
k
].
h
>
cell_stars_h_max
)
{
message
(
"spart %lld is inconsistent (%f > %f)"
,
s
->
sparts
[
k
].
id
,
s
->
sparts
[
k
].
h
,
cell_stars_h_max
);
}
}
return
0
;
}
...
...
@@ -225,6 +261,8 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
float
h_max
=
0
.
0
f
;
float
dx_max
=
0
.
0
f
;
float
stars_h_max
=
0
.
0
f
;
float
stars_dx_max
=
0
.
0
f
;
int
result
=
1
;
const
double
loc_min
[
3
]
=
{
c
->
loc
[
0
],
c
->
loc
[
1
],
c
->
loc
[
2
]};
...
...
@@ -260,6 +298,33 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
dx_max
=
max
(
dx_max
,
sqrt
(
dx2
));
}
const
size_t
nr_sparts
=
c
->
stars
.
count
;
struct
spart
*
sparts
=
c
->
stars
.
parts
;
for
(
size_t
k
=
0
;
k
<
nr_sparts
;
k
++
)
{
struct
spart
*
const
sp
=
&
sparts
[
k
];
if
(
sp
->
x
[
0
]
<
loc_min
[
0
]
||
sp
->
x
[
0
]
>=
loc_max
[
0
]
||
sp
->
x
[
1
]
<
loc_min
[
1
]
||
sp
->
x
[
1
]
>=
loc_max
[
1
]
||
sp
->
x
[
2
]
<
loc_min
[
2
]
||
sp
->
x
[
2
]
>=
loc_max
[
2
])
{
message
(
"Inconsistent part position p->x=[%e %e %e], c->loc=[%e %e %e] "
"c->width=[%e %e %e]"
,
sp
->
x
[
0
],
sp
->
x
[
1
],
sp
->
x
[
2
],
c
->
loc
[
0
],
c
->
loc
[
1
],
c
->
loc
[
2
],
c
->
width
[
0
],
c
->
width
[
1
],
c
->
width
[
2
]);
result
=
0
;
}
const
float
dx2
=
sp
->
x_diff
[
0
]
*
sp
->
x_diff
[
0
]
+
sp
->
x_diff
[
1
]
*
sp
->
x_diff
[
1
]
+
sp
->
x_diff
[
2
]
*
sp
->
x_diff
[
2
];
stars_h_max
=
max
(
stars_h_max
,
sp
->
h
);
stars_dx_max
=
max
(
stars_dx_max
,
sqrt
(
dx2
));
}
if
(
c
->
split
)
{
for
(
int
k
=
0
;
k
<
8
;
k
++
)
{
if
(
c
->
progeny
[
k
]
!=
NULL
)
{
...
...
@@ -283,6 +348,19 @@ int checkCellhdxmax(const struct cell *c, int *depth) {
result
=
0
;
}
if
(
c
->
stars
.
h_max
!=
stars_h_max
)
{
message
(
"%d Inconsistent stars_h_max: cell %f != parts %f"
,
*
depth
,
c
->
stars
.
h_max
,
stars_h_max
);
message
(
"location: %f %f %f"
,
c
->
loc
[
0
],
c
->
loc
[
1
],
c
->
loc
[
2
]);
result
=
0
;
}
if
(
c
->
stars
.
dx_max_part
!=
stars_dx_max
)
{
message
(
"%d Inconsistent stars_dx_max: %f != %f"
,
*
depth
,
c
->
stars
.
dx_max_part
,
stars_dx_max
);
message
(
"location: %f %f %f"
,
c
->
loc
[
0
],
c
->
loc
[
1
],
c
->
loc
[
2
]);
result
=
0
;
}
return
result
;
}
...
...
src/engine_maketasks.c
View file @
9dbbde7a
...
...
@@ -203,6 +203,86 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
#endif
}
/**
* @brief Add send tasks for the stars pairs to a hierarchy of cells.
*
* @param e The #engine.
* @param ci The sending #cell.
* @param cj Dummy cell containing the nodeID of the receiving node.
* @param t_xv The send_xv #task, if it has already been created.
* @param t_rho The send_rho #task, if it has already been created.
*/
void
engine_addtasks_send_stars
(
struct
engine
*
e
,
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
task
*
t_xv
,
struct
task
*
t_rho
)
{
#ifdef WITH_MPI
struct
link
*
l
=
NULL
;
struct
scheduler
*
s
=
&
e
->
sched
;
const
int
nodeID
=
cj
->
nodeID
;
/* Check if any of the density tasks are for the target node. */
for
(
l
=
ci
->
stars
.
density
;
l
!=
NULL
;
l
=
l
->
next
)
if
(
l
->
t
->
ci
->
nodeID
==
nodeID
||
(
l
->
t
->
cj
!=
NULL
&&
l
->
t
->
cj
->
nodeID
==
nodeID
))
break
;
/* If so, attach send tasks. */
if
(
l
!=
NULL
)
{
/* Get the task if created in hydro part */
struct
link
*
hydro
=
NULL
;
for
(
hydro
=
ci
->
mpi
.
hydro
.
send_xv
;
hydro
!=
NULL
;
hydro
=
hydro
->
next
)
{
if
(
hydro
->
t
->
ci
->
nodeID
==
nodeID
||
(
hydro
->
t
->
cj
!=
NULL
&&
hydro
->
t
->
cj
->
nodeID
==
nodeID
))
{
break
;
}
}
// TODO Alexei: I guess that you can assume that if the send_xv exists,
// send_rho exists too
if
(
t_xv
==
NULL
)
{
/* Already exists, just need to get it */
if
(
hydro
!=
NULL
)
{
// TODO Alexei: set t_feedback
t_xv
=
hydro
->
t
;
/* This task does not exists, need to create it */
}
else
{
// TODO Alexei: create task and do correct unlocks
/* Make sure this cell is tagged. */
cell_ensure_tagged
(
ci
);
/* Create the tasks and their dependencies? */
t_xv
=
scheduler_addtask
(
s
,
task_type_send
,
task_subtype_xv
,
ci
->
mpi
.
tag
,
0
,
ci
,
cj
);
/* Drift before you send */
scheduler_addunlock
(
s
,
ci
->
hydro
.
super
->
hydro
.
drift
,
t_xv
);
}
}
if
(
hydro
==
NULL
)
{
engine_addlink
(
e
,
&
ci
->
mpi
.
hydro
.
send_xv
,
t_xv
);
// TODO Alexei: addlink
/* engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho); */
}
}
/* Recurse? */
if
(
ci
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
ci
->
progeny
[
k
]
!=
NULL
)
engine_addtasks_send_stars
(
e
,
ci
->
progeny
[
k
],
cj
,
t_xv
,
t_rho
);
#else
error
(
"SWIFT was not compiled with MPI support."
);
#endif
}
/**
* @brief Add send tasks for the time-step to a hierarchy of cells.
*
...
...
@@ -348,6 +428,74 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
#endif
}
/**
* @brief Add recv tasks for stars pairs to a hierarchy of cells.
*
* @param e The #engine.
* @param c The foreign #cell.
* @param t_xv The recv_xv #task, if it has already been created.
* @param t_rho The recv_rho #task, if it has already been created.
*/
void
engine_addtasks_recv_stars
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
task
*
t_xv
,
struct
task
*
t_rho
)
{
#ifdef WITH_MPI
struct
scheduler
*
s
=
&
e
->
sched
;
int
new_task
=
0
;
/* Have we reached a level where there are any stars (or hydro) tasks ? */
if
(
t_xv
==
NULL
&&
(
c
->
stars
.
density
!=
NULL
||
c
->
hydro
.
density
!=
NULL
))
{
#ifdef SWIFT_DEBUG_CHECKS
/* Make sure this cell has a valid tag. */
if
(
c
->
mpi
.
tag
<
0
)
error
(
"Trying to receive from untagged cell."
);
#endif // SWIFT_DEBUG_CHECKS
/* Create the tasks. */
if
(
c
->
mpi
.
hydro
.
recv_xv
==
NULL
)
{
new_task
=
1
;
t_xv
=
scheduler_addtask
(
s
,
task_type_recv
,
task_subtype_xv
,
c
->
mpi
.
tag
,
0
,
c
,
NULL
);
// TODO Alexei: create t_feedback task
/* t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho,
* c->mpi.tag, */
/* 0, c, NULL); */
}
else
{
// TODO Alexei: set t_feedback
t_xv
=
c
->
mpi
.
hydro
.
recv_xv
;
}
}
// TODO Alexei: set pointer
c
->
mpi
.
hydro
.
recv_xv
=
t_xv
;
/* c->mpi.hydro.recv_rho = t_rho; */
/* Add dependencies. */
if
(
c
->
hydro
.
sorts
!=
NULL
&&
new_task
)
{
scheduler_addunlock
(
s
,
t_xv
,
c
->
hydro
.
sorts
);
}
// TODO Alexei: You will need to sort the particles after receiving the spart
for
(
struct
link
*
l
=
c
->
stars
.
density
;
l
!=
NULL
;
l
=
l
->
next
)
{
scheduler_addunlock
(
s
,
t_xv
,
l
->
t
);
// TODO Alexei: I guess that you will need to unlock the recv here
/* scheduler_addunlock(s, l->t, t_rho); */
}
// TODO Alexei: unlock feedback task
/* for (struct link *l = c->hydro.force; l != NULL; l = l->next) */
/* scheduler_addunlock(s, t_rho, l->t); */
/* Recurse? */
if
(
c
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
engine_addtasks_recv_stars
(
e
,
c
->
progeny
[
k
],
t_xv
,
t_rho
);
#else
error
(
"SWIFT was not compiled with MPI support."
);
#endif
}
/**
* @brief Add recv tasks for gravity pairs to a hierarchy of cells.
*
...
...
@@ -784,12 +932,12 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
/* Are we in a super-cell ? */
if
(
c
->
super
==
c
)
{
/* Add the sort task. */
c
->
stars
.
sorts
=
scheduler_addtask
(
s
,
task_type_stars_sort
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
/* Local tasks only... */
if
(
c
->
nodeID
==
e
->
nodeID
)
{
// TODO Alexei: do not need to be only on local node with feedback
/* Add the sort task. */
c
->
stars
.
sorts
=
scheduler_addtask
(
s
,
task_type_stars_sort
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
/* Generate the ghost tasks. */
c
->
stars
.
ghost_in
=
...
...
@@ -1818,6 +1966,9 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
else
if
(
t
->
type
==
task_type_self
&&
t
->
subtype
==
task_subtype_stars_density
)
{
/* Make the self-density tasks depend on the drifts. */
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
/* Make the self-density tasks depend on the drift and gravity drift. */
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
...
...
@@ -1844,19 +1995,28 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
/* Make all stars density tasks depend on the hydro drift and sorts,
* gravity drift and star sorts. */
if
(
t
->
ci
->
nodeID
==
engine_rank
)
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
hydro
.
sorts
,
t
);
if
(
t
->
cj
->
nodeID
==
engine_rank
)
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
grav
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
sorts
,
t
);
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
if
(
t
->
ci
->
nodeID
==
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
// TODO Alexei: the stars in foreign cells need to be sorted before
// the feedback loop and after the ghosts
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
,
t
);
}
if
(
t
->
ci
->
hydro
.
super
!=
t
->
cj
->
hydro
.
super
)
{
if
(
t
->
cj
->
nodeID
==
engine_rank
)
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
hydro
.
drift
,
t
);