Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
SWIFT
SWIFTsim
Commits
52bbd903
Commit
52bbd903
authored
Dec 21, 2018
by
Loic Hausammann
Browse files
Stars: Feedback and density work over MPI
parent
fdbe16b3
Changes
16
Hide whitespace changes
Inline
Side-by-side
src/cell.c
View file @
52bbd903
...
...
@@ -2004,11 +2004,16 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
if
(
c
==
c
->
super
)
{
#ifdef SWIFT_DEBUG_CHECKS
if
(
c
->
stars
.
sorts
==
NULL
)
if
(
c
->
stars
.
sorts
_local
==
NULL
&&
c
->
stars
.
sorts_foreign
==
NULL
)
error
(
"Trying to activate un-existing c->stars.sorts"
);
#endif
scheduler_activate
(
s
,
c
->
stars
.
sorts
);
if
(
c
->
nodeID
==
engine_rank
)
{
if
(
c
->
stars
.
sorts_local
)
{
scheduler_activate
(
s
,
c
->
stars
.
sorts_local
);
}
if
(
c
->
stars
.
sorts_foreign
)
{
scheduler_activate
(
s
,
c
->
stars
.
sorts_foreign
);
}
if
(
c
->
stars
.
sorts_local
)
{
// MATTHIEU: to do: do we actually need both drifts here?
cell_activate_drift_part
(
c
,
s
);
cell_activate_drift_spart
(
c
,
s
);
...
...
@@ -2021,11 +2026,17 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
parent
->
stars
.
do_sub_sort
=
1
;
if
(
parent
==
c
->
super
)
{
#ifdef SWIFT_DEBUG_CHECKS
if
(
parent
->
stars
.
sorts
==
NULL
)
if
(
parent
->
stars
.
sorts_local
==
NULL
&&
parent
->
stars
.
sorts_foreign
==
NULL
)
error
(
"Trying to activate un-existing parents->stars.sorts"
);
#endif
scheduler_activate
(
s
,
parent
->
stars
.
sorts
);
if
(
parent
->
nodeID
==
engine_rank
)
{
if
(
parent
->
stars
.
sorts_local
)
{
scheduler_activate
(
s
,
parent
->
stars
.
sorts_local
);
}
if
(
parent
->
stars
.
sorts_foreign
)
{
scheduler_activate
(
s
,
parent
->
stars
.
sorts_foreign
);
}
if
(
parent
->
stars
.
sorts_local
)
{
cell_activate_drift_part
(
parent
,
s
);
cell_activate_drift_spart
(
parent
,
s
);
}
...
...
@@ -2040,9 +2051,6 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
*/
void
cell_activate_stars_sorts
(
struct
cell
*
c
,
int
sid
,
struct
scheduler
*
s
)
{
// TODO Alexei, remove this
if
(
c
->
nodeID
!=
engine_rank
)
return
;
/* Do we need to re-sort? */
if
(
c
->
stars
.
dx_max_sort
>
space_maxreldx
*
c
->
dmin
)
{
...
...
@@ -3426,18 +3434,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Activate the send/recv tasks. */
if
(
ci_nodeID
!=
nodeID
)
{
// TODO Alexei: In this section, you will find some comments that
// are from the hydro code. It should look the same for the feedback.
/* If the local cell is active, receive data from the foreign cell. */
if
(
cj_active
)
{
scheduler_activate
(
s
,
ci
->
mpi
.
hydro
.
recv_xv
);
/*
if (ci_active) {
*/
/*
scheduler_activate(s, ci->mpi.
hydro.recv_rho); */
/* } */
if
(
ci_active
)
{
scheduler_activate
(
s
,
ci
->
mpi
.
stars
.
recv
);
}
}
/*
/\*
If the foreign cell is active, we want its ti_end values.
*\/
*/
/*
if (ci_active) scheduler_activate(s, ci->mpi.recv_ti);
*/
/* If the foreign cell is active, we want its ti_end values. */
if
(
ci_active
)
scheduler_activate
(
s
,
ci
->
mpi
.
recv_ti
);
/* Is the foreign cell active and will need stuff from us? */
if
(
ci_active
)
{
...
...
@@ -3448,30 +3453,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
particles will be drifted, only those that are needed. */
cell_activate_drift_part
(
cj
,
s
);
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (cj_active) { */
/* scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID); */
/* } */
/* If the local cell is also active, more stuff will be needed.
*/
if
(
cj_active
)
{
scheduler_activate_send
(
s
,
cj
->
mpi
.
stars
.
send
,
ci_nodeID
);
}
}
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti,
* ci_nodeID); */
/* If the local cell is active, send its ti_end values. */
if
(
cj_active
)
scheduler_activate_send
(
s
,
cj
->
mpi
.
send_ti
,
ci_nodeID
);
}
else
if
(
cj_nodeID
!=
nodeID
)
{
/* If the local cell is active, receive data from the foreign cell. */
if
(
ci_active
)
{
scheduler_activate
(
s
,
cj
->
mpi
.
hydro
.
recv_xv
);
/*
if (cj_active) {
*/
/*
scheduler_activate(s, cj->mpi.
hydro.recv_rho); */
/* } */
if
(
cj_active
)
{
scheduler_activate
(
s
,
cj
->
mpi
.
stars
.
recv
);
}
}
/*
/\*
If the foreign cell is active, we want its ti_end values.
*\/
*/
/*
if (cj_active) scheduler_activate(s, cj->mpi.recv_ti);
*/
/* If the foreign cell is active, we want its ti_end values. */
if
(
cj_active
)
scheduler_activate
(
s
,
cj
->
mpi
.
recv_ti
);
/* Is the foreign cell active and will need stuff from us? */
if
(
cj_active
)
{
...
...
@@ -3482,18 +3485,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
particles will be drifted, only those that are needed. */
cell_activate_drift_part
(
ci
,
s
);
/* /\* If the local cell is also active, more stuff will be needed.
* *\/ */
/* if (ci_active) { */
/* scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID); */
/* } */
/* If the local cell is also active, more stuff will be needed.
*/
if
(
ci_active
)
{
scheduler_activate_send
(
s
,
ci
->
mpi
.
stars
.
send
,
cj_nodeID
);
}
}
/* /\* If the local cell is active, send its ti_end values. *\/ */
/* if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti,
* cj_nodeID); */
/* If the local cell is active, send its ti_end values. */
if
(
ci_active
)
scheduler_activate_send
(
s
,
ci
->
mpi
.
send_ti
,
cj_nodeID
);
}
#endif
}
...
...
src/cell.h
View file @
52bbd903
...
...
@@ -481,8 +481,11 @@ struct cell {
/*! Linked list of the tasks computing this cell's star feedback. */
struct
link
*
feedback
;
/*! The task computing this cell's sorts. */
struct
task
*
sorts
;
/*! The task computing this cell's sorts before the density. */
struct
task
*
sorts_local
;
/*! The task computing this cell's sorts before the feedback. */
struct
task
*
sorts_foreign
;
/*! Max smoothing length in this cell. */
double
h_max
;
...
...
@@ -580,11 +583,18 @@ struct cell {
}
grav
;
struct
{
/* Task receiving spart data. */
struct
task
*
recv
;
/* Task receiving gpart data. */
/* Linked list for sending spart data. */
struct
link
*
send
;
}
stars
;
struct
{
/* Task receiving limiter data. */
struct
task
*
recv
;
/* Linked list for sending
gpart
data. */
/* Linked list for sending
limiter
data. */
struct
link
*
send
;
}
limiter
;
...
...
@@ -1014,25 +1024,6 @@ cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
cj
->
dmin
);
}
/**
* @brief Have star particles in a pair of cells moved too much and require a
* rebuild
* ?
*
* @param ci The first #cell.
* @param cj The second #cell.
*/
__attribute__
((
always_inline
))
INLINE
static
int
cell_need_rebuild_for_stars_pair
(
const
struct
cell
*
ci
,
const
struct
cell
*
cj
)
{
/* Is the cut-off radius plus the max distance the parts in both cells have */
/* moved larger than the cell size ? */
/* Note ci->dmin == cj->dmin */
return
(
kernel_gamma
*
max
(
ci
->
stars
.
h_max
,
cj
->
stars
.
h_max
)
+
ci
->
stars
.
dx_max_part
+
cj
->
stars
.
dx_max_part
>
cj
->
dmin
);
}
/**
* @brief Add a unique tag to a cell, mostly for MPI communications.
*
...
...
src/engine_maketasks.c
View file @
52bbd903
...
...
@@ -210,11 +210,11 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
* @param ci The sending #cell.
* @param cj Dummy cell containing the nodeID of the receiving node.
* @param t_xv The send_xv #task, if it has already been created.
* @param t_
rho
The send_
rho
#task, if it has already been created.
* @param t_
feed
The send_
feed
#task, if it has already been created.
*/
void
engine_addtasks_send_stars
(
struct
engine
*
e
,
struct
cell
*
ci
,
struct
cell
*
cj
,
struct
task
*
t_xv
,
struct
task
*
t_
rho
)
{
struct
task
*
t_
feed
)
{
#ifdef WITH_MPI
struct
link
*
l
=
NULL
;
...
...
@@ -238,24 +238,17 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
}
}
// TODO Alexei: I guess that you can assume that if the send_xv exists,
// send_rho exists too
if
(
t_xv
==
NULL
)
{
/* Make sure this cell is tagged. */
cell_ensure_tagged
(
ci
);
/* Already exists, just need to get it */
if
(
hydro
!=
NULL
)
{
// TODO Alexei: set t_feedback
t_xv
=
hydro
->
t
;
/* This task does not exists, need to create it */
}
else
{
// TODO Alexei: create task and do correct unlocks
/* Make sure this cell is tagged. */
cell_ensure_tagged
(
ci
);
/* Create the tasks and their dependencies? */
t_xv
=
scheduler_addtask
(
s
,
task_type_send
,
task_subtype_xv
,
ci
->
mpi
.
tag
,
0
,
ci
,
cj
);
...
...
@@ -263,20 +256,26 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
/* Drift before you send */
scheduler_addunlock
(
s
,
ci
->
hydro
.
super
->
hydro
.
drift
,
t_xv
);
}
/* Create the tasks and their dependencies? */
t_feed
=
scheduler_addtask
(
s
,
task_type_send
,
task_subtype_spart
,
ci
->
mpi
.
tag
,
0
,
ci
,
cj
);
/* Ghost before you send */
scheduler_addunlock
(
s
,
ci
->
super
->
stars
.
ghost_out
,
t_feed
);
}
if
(
hydro
==
NULL
)
{
engine_addlink
(
e
,
&
ci
->
mpi
.
hydro
.
send_xv
,
t_xv
);
// TODO Alexei: addlink
/* engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho); */
}
engine_addlink
(
e
,
&
ci
->
mpi
.
stars
.
send
,
t_feed
);
}
/* Recurse? */
if
(
ci
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
ci
->
progeny
[
k
]
!=
NULL
)
engine_addtasks_send_stars
(
e
,
ci
->
progeny
[
k
],
cj
,
t_xv
,
t_
rho
);
engine_addtasks_send_stars
(
e
,
ci
->
progeny
[
k
],
cj
,
t_xv
,
t_
feed
);
#else
error
(
"SWIFT was not compiled with MPI support."
);
...
...
@@ -316,6 +315,12 @@ void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
(
l
->
t
->
cj
!=
NULL
&&
l
->
t
->
cj
->
nodeID
==
nodeID
))
break
;
if
(
l
==
NULL
)
for
(
l
=
ci
->
stars
.
density
;
l
!=
NULL
;
l
=
l
->
next
)
if
(
l
->
t
->
ci
->
nodeID
==
nodeID
||
(
l
->
t
->
cj
!=
NULL
&&
l
->
t
->
cj
->
nodeID
==
nodeID
))
break
;
/* If found anything, attach send tasks. */
if
(
l
!=
NULL
)
{
...
...
@@ -434,10 +439,10 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
* @param e The #engine.
* @param c The foreign #cell.
* @param t_xv The recv_xv #task, if it has already been created.
* @param t_
rho
The recv_
rho
#task, if it has already been created.
* @param t_
feed
The recv_
feed
#task, if it has already been created.
*/
void
engine_addtasks_recv_stars
(
struct
engine
*
e
,
struct
cell
*
c
,
struct
task
*
t_xv
,
struct
task
*
t_
rho
)
{
struct
task
*
t_xv
,
struct
task
*
t_
feed
)
{
#ifdef WITH_MPI
struct
scheduler
*
s
=
&
e
->
sched
;
...
...
@@ -456,40 +461,38 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
new_task
=
1
;
t_xv
=
scheduler_addtask
(
s
,
task_type_recv
,
task_subtype_xv
,
c
->
mpi
.
tag
,
0
,
c
,
NULL
);
// TODO Alexei: create t_feedback task
/* t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho,
* c->mpi.tag, */
/* 0, c, NULL); */
}
else
{
// TODO Alexei: set t_feedback
t_xv
=
c
->
mpi
.
hydro
.
recv_xv
;
}
t_feed
=
scheduler_addtask
(
s
,
task_type_recv
,
task_subtype_spart
,
c
->
mpi
.
tag
,
0
,
c
,
NULL
);
/* Need to sort task before feedback loop */
scheduler_addunlock
(
s
,
t_feed
,
c
->
super
->
stars
.
sorts_foreign
);
}
// TODO Alexei: set pointer
c
->
mpi
.
hydro
.
recv_xv
=
t_xv
;
/*
c->mpi.
hydro
.recv
_rho
= t_
rho; */
c
->
mpi
.
stars
.
recv
=
t_
feed
;
/* Add dependencies. */
if
(
c
->
hydro
.
sorts
!=
NULL
&&
new_task
)
{
scheduler_addunlock
(
s
,
t_xv
,
c
->
hydro
.
sorts
);
}
// TODO Alexei: You will need to sort the particles after receiving the spart
for
(
struct
link
*
l
=
c
->
stars
.
density
;
l
!=
NULL
;
l
=
l
->
next
)
{
scheduler_addunlock
(
s
,
t_xv
,
l
->
t
);
// TODO Alexei: I guess that you will need to unlock the recv here
/* scheduler_addunlock(s, l->t, t_rho); */
scheduler_addunlock
(
s
,
l
->
t
,
t_feed
);
}
// TODO Alexei: unlock feedback task
/* for (struct link *l = c->hydro.force; l != NULL; l = l->next) */
/* scheduler_addunlock(s, t_rho, l->t); */
for
(
struct
link
*
l
=
c
->
stars
.
feedback
;
l
!=
NULL
;
l
=
l
->
next
)
{
scheduler_addunlock
(
s
,
t_feed
,
l
->
t
);
}
/* Recurse? */
if
(
c
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
if
(
c
->
progeny
[
k
]
!=
NULL
)
engine_addtasks_recv_stars
(
e
,
c
->
progeny
[
k
],
t_xv
,
t_
rho
);
engine_addtasks_recv_stars
(
e
,
c
->
progeny
[
k
],
t_xv
,
t_
feed
);
#else
error
(
"SWIFT was not compiled with MPI support."
);
...
...
@@ -594,6 +597,9 @@ void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
}
}
for
(
struct
link
*
l
=
c
->
stars
.
feedback
;
l
!=
NULL
;
l
=
l
->
next
)
scheduler_addunlock
(
s
,
l
->
t
,
t_ti
);
/* Recurse? */
if
(
c
->
split
)
for
(
int
k
=
0
;
k
<
8
;
k
++
)
...
...
@@ -931,13 +937,16 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
/* Are we in a super-cell ? */
if
(
c
->
super
==
c
)
{
/* Foreign tasks only */
if
(
c
->
nodeID
!=
e
->
nodeID
)
{
c
->
stars
.
sorts_foreign
=
scheduler_addtask
(
s
,
task_type_stars_sort_foreign
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
}
/* Local tasks only... */
if
(
c
->
nodeID
==
e
->
nodeID
)
{
// TODO Alexei: do not need to be only on local node with feedback
/* Add the sort task. */
c
->
stars
.
sorts
=
scheduler_addtask
(
s
,
task_type_stars_sort
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
c
->
stars
.
sorts_local
=
scheduler_addtask
(
s
,
task_type_stars_sort_local
,
task_subtype_none
,
0
,
0
,
c
,
NULL
);
/* Generate the ghost tasks. */
c
->
stars
.
ghost_in
=
...
...
@@ -1202,11 +1211,19 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
}
/* Link stars sort tasks to all the higher sort task. */
if
(
t_type
==
task_type_stars_sort
)
{
if
(
t_type
==
task_type_stars_sort
_local
)
{
for
(
struct
cell
*
finger
=
t
->
ci
->
parent
;
finger
!=
NULL
;
finger
=
finger
->
parent
)
if
(
finger
->
stars
.
sorts
!=
NULL
)
scheduler_addunlock
(
sched
,
t
,
finger
->
stars
.
sorts
);
finger
=
finger
->
parent
)
{
if
(
finger
->
stars
.
sorts_local
!=
NULL
)
scheduler_addunlock
(
sched
,
t
,
finger
->
stars
.
sorts_local
);
}
}
if
(
t_type
==
task_type_stars_sort_foreign
)
{
for
(
struct
cell
*
finger
=
t
->
ci
->
parent
;
finger
!=
NULL
;
finger
=
finger
->
parent
)
{
if
(
finger
->
stars
.
sorts_foreign
!=
NULL
)
scheduler_addunlock
(
sched
,
t
,
finger
->
stars
.
sorts_foreign
);
}
}
/* Link self tasks to cells. */
...
...
@@ -1959,7 +1976,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
struct
task
*
t
=
&
((
struct
task
*
)
map_data
)[
ind
];
/* Sort tasks depend on the drift and gravity drift of the cell. */
if
(
t
->
type
==
task_type_stars_sort
&&
t
->
ci
->
nodeID
==
engine_rank
)
{
if
(
t
->
type
==
task_type_stars_sort
_local
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
}
...
...
@@ -1968,9 +1985,6 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
else
if
(
t
->
type
==
task_type_self
&&
t
->
subtype
==
task_subtype_stars_density
)
{
/* Make the self-density tasks depend on the drifts. */
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
/* Make the self-density tasks depend on the drift and gravity drift. */
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
...
...
@@ -2002,9 +2016,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if
(
t
->
ci
->
nodeID
==
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
// TODO Alexei: the stars in foreign cells need to be sorted before
// the feedback loop and after the ghosts
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts_local
,
t
);
}
if
(
t
->
ci
->
hydro
.
super
!=
t
->
cj
->
hydro
.
super
)
{
...
...
@@ -2016,8 +2028,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
if
(
t
->
cj
->
nodeID
==
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
grav
.
drift
,
t
);
// TODO Alexei: same here, sort before feedback
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts_local
,
t
);
}
}
...
...
@@ -2026,6 +2037,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
scheduler_addtask
(
sched
,
task_type_pair
,
task_subtype_stars_feedback
,
0
,
0
,
t
->
ci
,
t
->
cj
);
/* Add sort before feedback loop */
if
(
t
->
ci
->
nodeID
!=
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts_foreign
,
t2
);
}
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
if
(
t
->
cj
->
nodeID
!=
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts_foreign
,
t2
);
}
}
/* Add the link between the new loop and both cells */
engine_addlink
(
e
,
&
t
->
ci
->
stars
.
feedback
,
t2
);
engine_addlink
(
e
,
&
t
->
cj
->
stars
.
feedback
,
t2
);
...
...
@@ -2036,10 +2057,10 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
scheduler_addunlock
(
sched
,
t2
,
t
->
ci
->
super
->
end_force
);
}
if
(
t
->
cj
->
nodeID
==
nodeID
)
{
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
engine_make_stars_loops_dependencies
(
sched
,
t
,
t2
,
t
->
cj
);
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
scheduler_addunlock
(
sched
,
t2
,
t
->
cj
->
super
->
end_force
);
}
}
}
...
...
@@ -2052,7 +2073,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
hydro
.
super
->
hydro
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
_local
,
t
);
/* Start by constructing the task for the second stars loop */
struct
task
*
t2
=
scheduler_addtask
(
sched
,
task_type_sub_self
,
...
...
@@ -2081,8 +2102,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if
(
t
->
cj
->
nodeID
==
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
grav
.
drift
,
t
);
// TODO Alexei: Still the same
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts_local
,
t
);
}
if
(
t
->
ci
->
hydro
.
super
!=
t
->
cj
->
hydro
.
super
)
{
...
...
@@ -2094,8 +2114,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
if
(
t
->
ci
->
nodeID
==
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
grav
.
drift
,
t
);
// TODO Alexei: still the same
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts
,
t
);
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts_local
,
t
);
}
}
...
...
@@ -2104,6 +2123,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
task_subtype_stars_feedback
,
t
->
flags
,
0
,
t
->
ci
,
t
->
cj
);
/* Add the sort before feedback */
if
(
t
->
cj
->
nodeID
!=
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
cj
->
super
->
stars
.
sorts_foreign
,
t2
);
}
if
(
t
->
ci
->
super
!=
t
->
cj
->
super
)
{
if
(
t
->
ci
->
nodeID
!=
engine_rank
)
{
scheduler_addunlock
(
sched
,
t
->
ci
->
super
->
stars
.
sorts_foreign
,
t2
);
}
}
/* Add the link between the new loop and both cells */
engine_addlink
(
e
,
&
t
->
ci
->
stars
.
feedback
,
t2
);
engine_addlink
(
e
,
&
t
->
cj
->
stars
.
feedback
,
t2
);
...
...
src/engine_marktasks.c
View file @
52bbd903
...
...
@@ -71,19 +71,15 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
/* Activate the send/recv tasks. */
if
(
ci_nodeID
!=
nodeID
)
{
// TODO Alexei: here I think you will just need to uncomment the code
// and modify it from hydro to stars (this is almost just a copy from the
// hydro)
/* If the local cell is active, receive data from the foreign cell. */
if
(
cj_active_stars
)
{
scheduler_activate
(
s
,
ci
->
mpi
.
hydro
.
recv_xv
);
/*
if (ci_active_
hydro) { */
/*
scheduler_activate(s, ci->mpi.
hydro.recv_rho); */
/* } */
if
(
ci_active_
stars
)
{
scheduler_activate
(
s
,
ci
->
mpi
.
stars
.
recv
);
}
}
/* If the foreign cell is active, we want its ti_end values. */
/*
if (ci_active_stars) scheduler_activate(s, ci->mpi.recv_ti);
*/
if
(
ci_active_stars
)
scheduler_activate
(
s
,
ci
->
mpi
.
recv_ti
);
/* Is the foreign cell active and will need stuff from us? */
if
(
ci_active_stars
)
{
...
...
@@ -97,29 +93,28 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
cell_activate_drift_part
(
l
->
t
->
ci
,
s
);
/* If the local cell is also active, more stuff will be needed. */
/* if (cj_active_hydro) { */
/* scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
*/
/* } */
if
(
cj_active_stars
)
{
scheduler_activate_send
(
s
,
cj
->
mpi
.
stars
.
send
,
ci_nodeID
);
}
}
/* If the local cell is active, send its ti_end values. */
/* if (cj_active_hydro) */
/* scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID); */
if
(
cj_active_stars
)
{
scheduler_activate_send
(
s
,
cj
->
mpi
.
send_ti
,
ci_nodeID
);
}
}
else
if
(
cj_nodeID
!=
nodeID
)
{
/* If the local cell is active, receive data from the foreign cell. */
if
(
ci_active_stars
)
{
scheduler_activate
(
s
,
cj
->
mpi
.
hydro
.
recv_xv
);
/*
if (cj_active_
hydro) { */
/*
scheduler_activate(s, cj->mpi.
hydro.recv_rho); */
/* } */
if
(
cj_active_
stars
)
{
scheduler_activate
(
s
,
cj
->
mpi
.
stars
.
recv
);
}
}
/* If the foreign cell is active, we want its ti_end values. */
/*
if (cj_active_
hydro
) scheduler_activate(s, cj->mpi.recv_ti);
*/
if
(
cj_active_
stars
)
scheduler_activate
(
s
,
cj
->
mpi
.
recv_ti
);
/* Is the foreign cell active and will need stuff from us? */
if
(
cj_active_stars
)
{
...
...
@@ -133,17 +128,15 @@ void engine_activate_stars_mpi(struct engine *e, struct scheduler *s,
cell_activate_drift_part
(
l
->
t
->
ci
,
s
);
/* If the local cell is also active, more stuff will be needed. */
/* if (ci_active_hydro) { */
/* scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
*/
/* } */
if
(
ci_active_stars
)
{
scheduler_activate_send
(
s
,
ci
->
mpi
.
stars
.
send
,
cj_nodeID
);
}
}
/* If the local cell is active, send its ti_end values. */
/* if (ci_active_hydro) */
/* scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID); */
if
(
ci_active_stars
)
{
scheduler_activate_send
(
s
,
ci
->
mpi
.
send_ti
,
cj_nodeID
);
}
}
}
#endif
...
...
src/error.h
View file @
52bbd903
...
...
@@ -54,7 +54,7 @@ extern int engine_rank;
fprintf(stderr, "[%04i] %s %s:%s():%i: " s "\n", engine_rank, \
clocks_get_timesincestart(), __FILE__, __FUNCTION__, __LINE__, \
##__VA_ARGS__); \
MPI_Abort(MPI_COMM_WORLD, -1);
\
swift_abort(1);
\
})
#else
#define error(s, ...) \
...
...
src/runner.c
View file @
52bbd903
...
...
@@ -209,6 +209,7 @@ void runner_do_stars_ghost(struct runner *r, struct cell *c, int timer) {
/* Skip if h is already h_max and we don't have enough neighbours */
if
((
sp
->
h
>=
stars_h_max
)
&&
(
f
<
0
.
f
))
{