Commit 52bbd903 authored by Loic Hausammann's avatar Loic Hausammann
Browse files

Stars: Feedback and density work over MPI

parent fdbe16b3
...@@ -2004,11 +2004,16 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) { ...@@ -2004,11 +2004,16 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
if (c == c->super) { if (c == c->super) {
#ifdef SWIFT_DEBUG_CHECKS #ifdef SWIFT_DEBUG_CHECKS
if (c->stars.sorts == NULL) if (c->stars.sorts_local == NULL && c->stars.sorts_foreign == NULL)
error("Trying to activate un-existing c->stars.sorts"); error("Trying to activate un-existing c->stars.sorts");
#endif #endif
scheduler_activate(s, c->stars.sorts); if (c->stars.sorts_local) {
if (c->nodeID == engine_rank) { scheduler_activate(s, c->stars.sorts_local);
}
if (c->stars.sorts_foreign) {
scheduler_activate(s, c->stars.sorts_foreign);
}
if (c->stars.sorts_local) {
// MATTHIEU: to do: do we actually need both drifts here? // MATTHIEU: to do: do we actually need both drifts here?
cell_activate_drift_part(c, s); cell_activate_drift_part(c, s);
cell_activate_drift_spart(c, s); cell_activate_drift_spart(c, s);
...@@ -2021,11 +2026,17 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) { ...@@ -2021,11 +2026,17 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
parent->stars.do_sub_sort = 1; parent->stars.do_sub_sort = 1;
if (parent == c->super) { if (parent == c->super) {
#ifdef SWIFT_DEBUG_CHECKS #ifdef SWIFT_DEBUG_CHECKS
if (parent->stars.sorts == NULL) if (parent->stars.sorts_local == NULL &&
parent->stars.sorts_foreign == NULL)
error("Trying to activate un-existing parents->stars.sorts"); error("Trying to activate un-existing parents->stars.sorts");
#endif #endif
scheduler_activate(s, parent->stars.sorts); if (parent->stars.sorts_local) {
if (parent->nodeID == engine_rank) { scheduler_activate(s, parent->stars.sorts_local);
}
if (parent->stars.sorts_foreign) {
scheduler_activate(s, parent->stars.sorts_foreign);
}
if (parent->stars.sorts_local) {
cell_activate_drift_part(parent, s); cell_activate_drift_part(parent, s);
cell_activate_drift_spart(parent, s); cell_activate_drift_spart(parent, s);
} }
...@@ -2040,9 +2051,6 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) { ...@@ -2040,9 +2051,6 @@ void cell_activate_stars_sorts_up(struct cell *c, struct scheduler *s) {
*/ */
void cell_activate_stars_sorts(struct cell *c, int sid, struct scheduler *s) { void cell_activate_stars_sorts(struct cell *c, int sid, struct scheduler *s) {
// TODO Alexei, remove this
if (c->nodeID != engine_rank) return;
/* Do we need to re-sort? */ /* Do we need to re-sort? */
if (c->stars.dx_max_sort > space_maxreldx * c->dmin) { if (c->stars.dx_max_sort > space_maxreldx * c->dmin) {
...@@ -3426,18 +3434,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) { ...@@ -3426,18 +3434,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Activate the send/recv tasks. */ /* Activate the send/recv tasks. */
if (ci_nodeID != nodeID) { if (ci_nodeID != nodeID) {
// TODO Alexei: In this section, you will find some comments that
// are from the hydro code. It should look the same for the feedback.
/* If the local cell is active, receive data from the foreign cell. */
if (cj_active) { if (cj_active) {
scheduler_activate(s, ci->mpi.hydro.recv_xv); scheduler_activate(s, ci->mpi.hydro.recv_xv);
/* if (ci_active) { */ if (ci_active) {
/* scheduler_activate(s, ci->mpi.hydro.recv_rho); */ scheduler_activate(s, ci->mpi.stars.recv);
/* } */ }
} }
/* /\* If the foreign cell is active, we want its ti_end values. *\/ */ /* If the foreign cell is active, we want its ti_end values. */
/* if (ci_active) scheduler_activate(s, ci->mpi.recv_ti); */ if (ci_active) scheduler_activate(s, ci->mpi.recv_ti);
/* Is the foreign cell active and will need stuff from us? */ /* Is the foreign cell active and will need stuff from us? */
if (ci_active) { if (ci_active) {
...@@ -3448,30 +3453,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) { ...@@ -3448,30 +3453,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
particles will be drifted, only those that are needed. */ particles will be drifted, only those that are needed. */
cell_activate_drift_part(cj, s); cell_activate_drift_part(cj, s);
/* /\* If the local cell is also active, more stuff will be needed. /* If the local cell is also active, more stuff will be needed.
* *\/ */ */
/* if (cj_active) { */ if (cj_active) {
/* scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID); */ scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
}
/* } */
} }
/* /\* If the local cell is active, send its ti_end values. *\/ */ /* If the local cell is active, send its ti_end values. */
/* if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti, if (cj_active) scheduler_activate_send(s, cj->mpi.send_ti, ci_nodeID);
* ci_nodeID); */
} else if (cj_nodeID != nodeID) { } else if (cj_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */ /* If the local cell is active, receive data from the foreign cell. */
if (ci_active) { if (ci_active) {
scheduler_activate(s, cj->mpi.hydro.recv_xv); scheduler_activate(s, cj->mpi.hydro.recv_xv);
/* if (cj_active) { */ if (cj_active) {
/* scheduler_activate(s, cj->mpi.hydro.recv_rho); */ scheduler_activate(s, cj->mpi.stars.recv);
/* } */ }
} }
/* /\* If the foreign cell is active, we want its ti_end values. *\/ */ /* If the foreign cell is active, we want its ti_end values. */
/* if (cj_active) scheduler_activate(s, cj->mpi.recv_ti); */ if (cj_active) scheduler_activate(s, cj->mpi.recv_ti);
/* Is the foreign cell active and will need stuff from us? */ /* Is the foreign cell active and will need stuff from us? */
if (cj_active) { if (cj_active) {
...@@ -3482,18 +3485,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) { ...@@ -3482,18 +3485,15 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
particles will be drifted, only those that are needed. */ particles will be drifted, only those that are needed. */
cell_activate_drift_part(ci, s); cell_activate_drift_part(ci, s);
/* /\* If the local cell is also active, more stuff will be needed. /* If the local cell is also active, more stuff will be needed.
* *\/ */ */
/* if (ci_active) { */ if (ci_active) {
scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
/* scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID); */ }
/* } */
} }
/* /\* If the local cell is active, send its ti_end values. *\/ */ /* If the local cell is active, send its ti_end values. */
/* if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti, if (ci_active) scheduler_activate_send(s, ci->mpi.send_ti, cj_nodeID);
* cj_nodeID); */
} }
#endif #endif
} }
......
...@@ -481,8 +481,11 @@ struct cell { ...@@ -481,8 +481,11 @@ struct cell {
/*! Linked list of the tasks computing this cell's star feedback. */ /*! Linked list of the tasks computing this cell's star feedback. */
struct link *feedback; struct link *feedback;
/*! The task computing this cell's sorts. */ /*! The task computing this cell's sorts before the density. */
struct task *sorts; struct task *sorts_local;
/*! The task computing this cell's sorts before the feedback. */
struct task *sorts_foreign;
/*! Max smoothing length in this cell. */ /*! Max smoothing length in this cell. */
double h_max; double h_max;
...@@ -580,11 +583,18 @@ struct cell { ...@@ -580,11 +583,18 @@ struct cell {
} grav; } grav;
struct { struct {
/* Task receiving spart data. */
struct task *recv;
/* Task receiving gpart data. */ /* Linked list for sending spart data. */
struct link *send;
} stars;
struct {
/* Task receiving limiter data. */
struct task *recv; struct task *recv;
/* Linked list for sending gpart data. */ /* Linked list for sending limiter data. */
struct link *send; struct link *send;
} limiter; } limiter;
...@@ -1014,25 +1024,6 @@ cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) { ...@@ -1014,25 +1024,6 @@ cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
cj->dmin); cj->dmin);
} }
/**
* @brief Have star particles in a pair of cells moved too much and require a
* rebuild
* ?
*
* @param ci The first #cell.
* @param cj The second #cell.
*/
__attribute__((always_inline)) INLINE static int
cell_need_rebuild_for_stars_pair(const struct cell *ci, const struct cell *cj) {
/* Is the cut-off radius plus the max distance the parts in both cells have */
/* moved larger than the cell size ? */
/* Note ci->dmin == cj->dmin */
return (kernel_gamma * max(ci->stars.h_max, cj->stars.h_max) +
ci->stars.dx_max_part + cj->stars.dx_max_part >
cj->dmin);
}
/** /**
* @brief Add a unique tag to a cell, mostly for MPI communications. * @brief Add a unique tag to a cell, mostly for MPI communications.
* *
......
...@@ -210,11 +210,11 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, ...@@ -210,11 +210,11 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
* @param ci The sending #cell. * @param ci The sending #cell.
* @param cj Dummy cell containing the nodeID of the receiving node. * @param cj Dummy cell containing the nodeID of the receiving node.
* @param t_xv The send_xv #task, if it has already been created. * @param t_xv The send_xv #task, if it has already been created.
* @param t_rho The send_rho #task, if it has already been created. * @param t_feed The send_feed #task, if it has already been created.
*/ */
void engine_addtasks_send_stars(struct engine *e, struct cell *ci, void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
struct cell *cj, struct task *t_xv, struct cell *cj, struct task *t_xv,
struct task *t_rho) { struct task *t_feed) {
#ifdef WITH_MPI #ifdef WITH_MPI
struct link *l = NULL; struct link *l = NULL;
...@@ -238,24 +238,17 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci, ...@@ -238,24 +238,17 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
} }
} }
// TODO Alexei: I guess that you can assume that if the send_xv exists,
// send_rho exists too
if (t_xv == NULL) { if (t_xv == NULL) {
/* Make sure this cell is tagged. */
cell_ensure_tagged(ci);
/* Already exists, just need to get it */ /* Already exists, just need to get it */
if (hydro != NULL) { if (hydro != NULL) {
// TODO Alexei: set t_feedback
t_xv = hydro->t; t_xv = hydro->t;
/* This task does not exists, need to create it */ /* This task does not exists, need to create it */
} else { } else {
// TODO Alexei: create task and do correct unlocks
/* Make sure this cell is tagged. */
cell_ensure_tagged(ci);
/* Create the tasks and their dependencies? */ /* Create the tasks and their dependencies? */
t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv,
ci->mpi.tag, 0, ci, cj); ci->mpi.tag, 0, ci, cj);
...@@ -263,20 +256,26 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci, ...@@ -263,20 +256,26 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
/* Drift before you send */ /* Drift before you send */
scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv); scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
} }
/* Create the tasks and their dependencies? */
t_feed = scheduler_addtask(s, task_type_send, task_subtype_spart,
ci->mpi.tag, 0, ci, cj);
/* Ghost before you send */
scheduler_addunlock(s, ci->super->stars.ghost_out, t_feed);
} }
if (hydro == NULL) { if (hydro == NULL) {
engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv); engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv);
// TODO Alexei: addlink
/* engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho); */
} }
engine_addlink(e, &ci->mpi.stars.send, t_feed);
} }
/* Recurse? */ /* Recurse? */
if (ci->split) if (ci->split)
for (int k = 0; k < 8; k++) for (int k = 0; k < 8; k++)
if (ci->progeny[k] != NULL) if (ci->progeny[k] != NULL)
engine_addtasks_send_stars(e, ci->progeny[k], cj, t_xv, t_rho); engine_addtasks_send_stars(e, ci->progeny[k], cj, t_xv, t_feed);
#else #else
error("SWIFT was not compiled with MPI support."); error("SWIFT was not compiled with MPI support.");
...@@ -316,6 +315,12 @@ void engine_addtasks_send_timestep(struct engine *e, struct cell *ci, ...@@ -316,6 +315,12 @@ void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
(l->t->cj != NULL && l->t->cj->nodeID == nodeID)) (l->t->cj != NULL && l->t->cj->nodeID == nodeID))
break; break;
if (l == NULL)
for (l = ci->stars.density; l != NULL; l = l->next)
if (l->t->ci->nodeID == nodeID ||
(l->t->cj != NULL && l->t->cj->nodeID == nodeID))
break;
/* If found anything, attach send tasks. */ /* If found anything, attach send tasks. */
if (l != NULL) { if (l != NULL) {
...@@ -434,10 +439,10 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, ...@@ -434,10 +439,10 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
* @param e The #engine. * @param e The #engine.
* @param c The foreign #cell. * @param c The foreign #cell.
* @param t_xv The recv_xv #task, if it has already been created. * @param t_xv The recv_xv #task, if it has already been created.
* @param t_rho The recv_rho #task, if it has already been created. * @param t_feed The recv_feed #task, if it has already been created.
*/ */
void engine_addtasks_recv_stars(struct engine *e, struct cell *c, void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
struct task *t_xv, struct task *t_rho) { struct task *t_xv, struct task *t_feed) {
#ifdef WITH_MPI #ifdef WITH_MPI
struct scheduler *s = &e->sched; struct scheduler *s = &e->sched;
...@@ -456,40 +461,38 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c, ...@@ -456,40 +461,38 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
new_task = 1; new_task = 1;
t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag, t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, c->mpi.tag,
0, c, NULL); 0, c, NULL);
// TODO Alexei: create t_feedback task
/* t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho,
* c->mpi.tag, */
/* 0, c, NULL); */
} else { } else {
// TODO Alexei: set t_feedback
t_xv = c->mpi.hydro.recv_xv; t_xv = c->mpi.hydro.recv_xv;
} }
t_feed = scheduler_addtask(s, task_type_recv, task_subtype_spart,
c->mpi.tag, 0, c, NULL);
/* Need to sort task before feedback loop */
scheduler_addunlock(s, t_feed, c->super->stars.sorts_foreign);
} }
// TODO Alexei: set pointer
c->mpi.hydro.recv_xv = t_xv; c->mpi.hydro.recv_xv = t_xv;
/* c->mpi.hydro.recv_rho = t_rho; */ c->mpi.stars.recv = t_feed;
/* Add dependencies. */ /* Add dependencies. */
if (c->hydro.sorts != NULL && new_task) { if (c->hydro.sorts != NULL && new_task) {
scheduler_addunlock(s, t_xv, c->hydro.sorts); scheduler_addunlock(s, t_xv, c->hydro.sorts);
} }
// TODO Alexei: You will need to sort the particles after receiving the spart
for (struct link *l = c->stars.density; l != NULL; l = l->next) { for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_xv, l->t); scheduler_addunlock(s, t_xv, l->t);
// TODO Alexei: I guess that you will need to unlock the recv here scheduler_addunlock(s, l->t, t_feed);
/* scheduler_addunlock(s, l->t, t_rho); */
} }
// TODO Alexei: unlock feedback task
/* for (struct link *l = c->hydro.force; l != NULL; l = l->next) */ for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
/* scheduler_addunlock(s, t_rho, l->t); */ scheduler_addunlock(s, t_feed, l->t);
}
/* Recurse? */ /* Recurse? */
if (c->split) if (c->split)
for (int k = 0; k < 8; k++) for (int k = 0; k < 8; k++)
if (c->progeny[k] != NULL) if (c->progeny[k] != NULL)
engine_addtasks_recv_stars(e, c->progeny[k], t_xv, t_rho); engine_addtasks_recv_stars(e, c->progeny[k], t_xv, t_feed);
#else #else
error("SWIFT was not compiled with MPI support."); error("SWIFT was not compiled with MPI support.");
...@@ -594,6 +597,9 @@ void engine_addtasks_recv_timestep(struct engine *e, struct cell *c, ...@@ -594,6 +597,9 @@ void engine_addtasks_recv_timestep(struct engine *e, struct cell *c,
} }
} }
for (struct link *l = c->stars.feedback; l != NULL; l = l->next)
scheduler_addunlock(s, l->t, t_ti);
/* Recurse? */ /* Recurse? */
if (c->split) if (c->split)
for (int k = 0; k < 8; k++) for (int k = 0; k < 8; k++)
...@@ -931,13 +937,16 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) { ...@@ -931,13 +937,16 @@ void engine_make_hierarchical_tasks_stars(struct engine *e, struct cell *c) {
/* Are we in a super-cell ? */ /* Are we in a super-cell ? */
if (c->super == c) { if (c->super == c) {
/* Foreign tasks only */
if (c->nodeID != e->nodeID) {
c->stars.sorts_foreign = scheduler_addtask(
s, task_type_stars_sort_foreign, task_subtype_none, 0, 0, c, NULL);
}
/* Local tasks only... */ /* Local tasks only... */
if (c->nodeID == e->nodeID) { if (c->nodeID == e->nodeID) {
// TODO Alexei: do not need to be only on local node with feedback c->stars.sorts_local = scheduler_addtask(
/* Add the sort task. */ s, task_type_stars_sort_local, task_subtype_none, 0, 0, c, NULL);
c->stars.sorts = scheduler_addtask(s, task_type_stars_sort,
task_subtype_none, 0, 0, c, NULL);
/* Generate the ghost tasks. */ /* Generate the ghost tasks. */
c->stars.ghost_in = c->stars.ghost_in =
...@@ -1202,11 +1211,19 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements, ...@@ -1202,11 +1211,19 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
} }
/* Link stars sort tasks to all the higher sort task. */ /* Link stars sort tasks to all the higher sort task. */
if (t_type == task_type_stars_sort) { if (t_type == task_type_stars_sort_local) {
for (struct cell *finger = t->ci->parent; finger != NULL; for (struct cell *finger = t->ci->parent; finger != NULL;
finger = finger->parent) finger = finger->parent) {
if (finger->stars.sorts != NULL) if (finger->stars.sorts_local != NULL)
scheduler_addunlock(sched, t, finger->stars.sorts); scheduler_addunlock(sched, t, finger->stars.sorts_local);
}
}
if (t_type == task_type_stars_sort_foreign) {
for (struct cell *finger = t->ci->parent; finger != NULL;
finger = finger->parent) {
if (finger->stars.sorts_foreign != NULL)
scheduler_addunlock(sched, t, finger->stars.sorts_foreign);
}
} }
/* Link self tasks to cells. */ /* Link self tasks to cells. */
...@@ -1959,7 +1976,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements, ...@@ -1959,7 +1976,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
struct task *t = &((struct task *)map_data)[ind]; struct task *t = &((struct task *)map_data)[ind];
/* Sort tasks depend on the drift and gravity drift of the cell. */ /* Sort tasks depend on the drift and gravity drift of the cell. */
if (t->type == task_type_stars_sort && t->ci->nodeID == engine_rank) { if (t->type == task_type_stars_sort_local) {
scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t); scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
scheduler_addunlock(sched, t->ci->super->grav.drift, t); scheduler_addunlock(sched, t->ci->super->grav.drift, t);
} }
...@@ -1968,9 +1985,6 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements, ...@@ -1968,9 +1985,6 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
else if (t->type == task_type_self && else if (t->type == task_type_self &&
t->subtype == task_subtype_stars_density) { t->subtype == task_subtype_stars_density) {
/* Make the self-density tasks depend on the drifts. */
scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
/* Make the self-density tasks depend on the drift and gravity drift. */ /* Make the self-density tasks depend on the drift and gravity drift. */
scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t); scheduler_addunlock(sched, t->ci->hydro.super->hydro.drift, t);
scheduler_addunlock(sched, t->ci->super->grav.drift, t); scheduler_addunlock(sched, t->ci->super->grav.drift, t);
...@@ -2002,9 +2016,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements, ...@@ -2002,9 +2016,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if (t->ci->nodeID == engine_rank) { if (t->ci->nodeID == engine_rank) {
scheduler_addunlock(sched, t->ci->super->grav.drift, t); scheduler_addunlock(sched, t->ci->super->grav.drift, t);
// TODO Alexei: the stars in foreign cells need to be sorted before scheduler_addunlock(sched, t->ci->super->stars.sorts_local, t);
// the feedback loop and after the ghosts
scheduler_addunlock(sched, t->ci->super->stars.sorts, t);
} }
if (t->ci->hydro.super != t->cj->hydro.super) { if (t->ci->hydro.super != t->cj->hydro.super) {
...@@ -2016,8 +2028,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements, ...@@ -2016,8 +2028,7 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
if (t->ci->super != t->cj->super) { if (t->ci->super != t->cj->super) {
if (t->cj->nodeID == engine_rank) { if (t->cj->nodeID == engine_rank) {
scheduler_addunlock(sched, t->cj->super->grav.drift, t); scheduler_addunlock(sched, t->cj->super->grav.drift, t);
// TODO Alexei: same here, sort before feedback scheduler_addunlock(sched, t->cj->super->stars.sorts_local, t);
scheduler_addunlock(sched, t->cj->super->stars.sorts, t);
} }
} }
...@@ -2026,6 +2037,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements, ...@@ -2026,6 +2037,16 @@ void engine_make_extra_starsloop_tasks_mapper(void *map_data, int num_elements,
scheduler_addtask(sched, task_type_pair, task_subtype_stars_feedback, scheduler_addtask(sched, task_type_pair, task_subtype_stars_feedback,
0, 0, t->ci, t->cj); 0, 0, t->ci, t->cj);
/* Add sort before feedback loop */
if (t->ci->nodeID != engine_rank) {
scheduler_addunlock(sched, t->ci->super->stars.sorts_foreign, t2);
}
if (t->ci->super != t->cj->super) {
if (t->cj->nodeID != engine_rank) {
scheduler_addunlock(sched, t->cj->super->stars.sorts_foreign, t2);
}
}
/* Add the link between the new loop and both cells */ /* Add the link between the new loop and both cells */
engine_addlink(e, &t->ci->stars.feedback, t2); engine_addlink(e, &t->ci->stars.feedback, t2);
engine_addlink(e, &t->cj->stars.feedback, t2);