Commit e21885b1 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Merge branch 'collapse_send_recv_tasks' into 'master'

Collapse send recv tasks

See merge request !790
parents 366725a2 b94ac186
......@@ -326,7 +326,8 @@ int cell_link_foreign_parts(struct cell *c, struct part *parts) {
#endif
/* Do we have a hydro task at this level? */
if (c->mpi.hydro.recv_xv != NULL) {
if (cell_get_recv(c, task_subtype_xv) != NULL) {
/* Recursively attach the parts */
const int counts = cell_link_parts(c, parts);
#ifdef SWIFT_DEBUG_CHECKS
......@@ -372,8 +373,9 @@ int cell_link_foreign_gparts(struct cell *c, struct gpart *gparts) {
error("Linking foreign particles in a local cell!");
#endif
/* Do we have a hydro task at this level? */
if (c->mpi.grav.recv != NULL) {
/* Do we have a gravity task at this level? */
if (cell_get_recv(c, task_subtype_gpart) != NULL) {
/* Recursively attach the gparts */
const int counts = cell_link_gparts(c, gparts);
#ifdef SWIFT_DEBUG_CHECKS
......@@ -418,7 +420,7 @@ int cell_count_parts_for_tasks(const struct cell *c) {
#endif
/* Do we have a hydro task at this level? */
if (c->mpi.hydro.recv_xv != NULL) {
if (cell_get_recv(c, task_subtype_xv) != NULL) {
return c->hydro.count;
}
......@@ -455,8 +457,8 @@ int cell_count_gparts_for_tasks(const struct cell *c) {
error("Counting foreign particles in a local cell!");
#endif
/* Do we have a hydro task at this level? */
if (c->mpi.grav.recv != NULL) {
/* Do we have a gravity task at this level? */
if (cell_get_recv(c, task_subtype_gpart) != NULL) {
return c->grav.count;
}
......@@ -2884,27 +2886,30 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
if (ci_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
if (cj_active) {
scheduler_activate(s, ci->mpi.hydro.recv_xv);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_xv);
if (ci_active) {
scheduler_activate(s, ci->mpi.hydro.recv_rho);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_rho);
#ifdef EXTRA_HYDRO_LOOP
scheduler_activate(s, ci->mpi.hydro.recv_gradient);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_gradient);
#endif
}
}
/* If the foreign cell is active, we want its ti_end values. */
if (ci_active || with_limiter)
scheduler_activate(s, ci->mpi.hydro.recv_ti);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_tend_part);
if (with_limiter) scheduler_activate(s, ci->mpi.limiter.recv);
if (with_limiter)
scheduler_activate_send(s, cj->mpi.limiter.send, ci->nodeID);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_limiter);
if (with_limiter)
scheduler_activate_send(s, cj->mpi.send, task_subtype_limiter,
ci->nodeID);
/* Is the foreign cell active and will need stuff from us? */
if (ci_active) {
scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_xv, ci_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -2913,42 +2918,48 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
/* If the local cell is also active, more stuff will be needed. */
if (cj_active) {
scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_rho,
ci_nodeID);
#ifdef EXTRA_HYDRO_LOOP
scheduler_activate_send(s, cj->mpi.hydro.send_gradient, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_gradient,
ci_nodeID);
#endif
}
}
/* If the local cell is active, send its ti_end values. */
if (cj_active || with_limiter)
scheduler_activate_send(s, cj->mpi.hydro.send_ti, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_tend_part,
ci_nodeID);
} else if (cj_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active) {
scheduler_activate(s, cj->mpi.hydro.recv_xv);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_xv);
if (cj_active) {
scheduler_activate(s, cj->mpi.hydro.recv_rho);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_rho);
#ifdef EXTRA_HYDRO_LOOP
scheduler_activate(s, cj->mpi.hydro.recv_gradient);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_gradient);
#endif
}
}
/* If the foreign cell is active, we want its ti_end values. */
if (cj_active || with_limiter)
scheduler_activate(s, cj->mpi.hydro.recv_ti);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_tend_part);
if (with_limiter) scheduler_activate(s, cj->mpi.limiter.recv);
if (with_limiter)
scheduler_activate_send(s, ci->mpi.limiter.send, cj->nodeID);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_limiter);
if (with_limiter)
scheduler_activate_send(s, ci->mpi.send, task_subtype_limiter,
cj->nodeID);
/* Is the foreign cell active and will need stuff from us? */
if (cj_active) {
scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_xv, cj_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -2957,17 +2968,21 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
/* If the local cell is also active, more stuff will be needed. */
if (ci_active) {
scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_rho,
cj_nodeID);
#ifdef EXTRA_HYDRO_LOOP
scheduler_activate_send(s, ci->mpi.hydro.send_gradient, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_gradient,
cj_nodeID);
#endif
}
}
/* If the local cell is active, send its ti_end values. */
if (ci_active || with_limiter)
scheduler_activate_send(s, ci->mpi.hydro.send_ti, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_tend_part,
cj_nodeID);
}
#endif
}
......@@ -3057,14 +3072,18 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
/* Activate the send/recv tasks. */
if (ci_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
if (cj_active) scheduler_activate(s, ci->mpi.grav.recv);
if (cj_active)
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_gpart);
/* If the foreign cell is active, we want its ti_end values. */
if (ci_active) scheduler_activate(s, ci->mpi.grav.recv_ti);
if (ci_active)
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_tend_gpart);
/* Is the foreign cell active and will need stuff from us? */
if (ci_active) {
scheduler_activate_send(s, cj->mpi.grav.send, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_gpart,
ci_nodeID);
/* Drift the cell which will be sent at the level at which it is
sent, i.e. drift the cell specified in the send task (l->t)
......@@ -3074,18 +3093,23 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
/* If the local cell is active, send its ti_end values. */
if (cj_active)
scheduler_activate_send(s, cj->mpi.grav.send_ti, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_tend_gpart,
ci_nodeID);
} else if (cj_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active) scheduler_activate(s, cj->mpi.grav.recv);
if (ci_active)
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_gpart);
/* If the foreign cell is active, we want its ti_end values. */
if (cj_active) scheduler_activate(s, cj->mpi.grav.recv_ti);
if (cj_active)
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_tend_gpart);
/* Is the foreign cell active and will need stuff from us? */
if (cj_active) {
scheduler_activate_send(s, ci->mpi.grav.send, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_gpart,
cj_nodeID);
/* Drift the cell which will be sent at the level at which it is
sent, i.e. drift the cell specified in the send task (l->t)
......@@ -3095,7 +3119,8 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
/* If the local cell is active, send its ti_end values. */
if (ci_active)
scheduler_activate_send(s, ci->mpi.grav.send_ti, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_tend_gpart,
cj_nodeID);
}
#endif
}
......@@ -3251,26 +3276,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Activate the send/recv tasks. */
if (ci_nodeID != nodeID) {
if (cj_active) {
scheduler_activate(s, ci->mpi.hydro.recv_xv);
scheduler_activate(s, ci->mpi.hydro.recv_rho);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_xv);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_rho);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_spart,
ci_nodeID);
cell_activate_drift_spart(cj, s);
/* If the local cell is active, send its ti_end values. */
scheduler_activate_send(s, cj->mpi.stars.send_ti, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_tend_spart,
ci_nodeID);
}
if (ci_active) {
scheduler_activate(s, ci->mpi.stars.recv);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_spart);
/* If the foreign cell is active, we want its ti_end values. */
scheduler_activate(s, ci->mpi.stars.recv_ti);
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_tend_spart);
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_xv, ci_nodeID);
scheduler_activate_send(s, cj->mpi.send, task_subtype_rho, ci_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -3280,26 +3307,28 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
} else if (cj_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active) {
scheduler_activate(s, cj->mpi.hydro.recv_xv);
scheduler_activate(s, cj->mpi.hydro.recv_rho);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_xv);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_rho);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_spart,
cj_nodeID);
cell_activate_drift_spart(ci, s);
/* If the local cell is active, send its ti_end values. */
scheduler_activate_send(s, ci->mpi.stars.send_ti, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_tend_spart,
cj_nodeID);
}
if (cj_active) {
scheduler_activate(s, cj->mpi.stars.recv);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_spart);
/* If the foreign cell is active, we want its ti_end values. */
scheduler_activate(s, cj->mpi.stars.recv_ti);
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_tend_spart);
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_xv, cj_nodeID);
scheduler_activate_send(s, ci->mpi.send, task_subtype_rho, cj_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -3476,9 +3505,7 @@ void cell_set_super_mapper(void *map_data, int num_elements, void *extra_data) {
*/
int cell_has_tasks(struct cell *c) {
#ifdef WITH_MPI
if (c->timestep != NULL || c->mpi.hydro.recv_ti != NULL ||
c->mpi.grav.recv_ti != NULL || c->mpi.stars.recv_ti != NULL)
return 1;
if (c->timestep != NULL || c->mpi.recv != NULL) return 1;
#else
if (c->timestep != NULL) return 1;
#endif
......
......@@ -653,71 +653,13 @@ struct cell {
/*! MPI variables */
struct {
struct {
/* Task receiving hydro data (positions). */
struct task *recv_xv;
/* Task receiving hydro data (density). */
struct task *recv_rho;
/* Task receiving hydro data (gradient). */
struct task *recv_gradient;
/* Task receiving data (time-step). */
struct task *recv_ti;
/* Linked list for sending hydro data (positions). */
struct link *send_xv;
/* Linked list for sending hydro data (density). */
struct link *send_rho;
/* Linked list for sending hydro data (gradient). */
struct link *send_gradient;
/* Linked list for sending data (time-step). */
struct link *send_ti;
} hydro;
struct {
/* Task receiving gpart data. */
struct task *recv;
/* Task receiving data (time-step). */
struct task *recv_ti;
/* Linked list for sending gpart data. */
union {
/* Single list of all send tasks associated with this cell. */
struct link *send;
/* Linked list for sending data (time-step). */
struct link *send_ti;
} grav;
struct {
/* Task receiving spart data. */
struct task *recv;
/* Task receiving data (time-step). */
struct task *recv_ti;
/* Linked list for sending spart data. */
struct link *send;
/* Linked list for sending data (time-step). */
struct link *send_ti;
} stars;
struct {
/* Task receiving limiter data. */
struct task *recv;
/* Linked list for sending limiter data. */
struct link *send;
} limiter;
/* Single list of all recv tasks associated with this cell. */
struct link *recv;
};
/*! Bit mask of the proxies this cell is registered with. */
unsigned long long int sendto;
......@@ -1250,4 +1192,18 @@ __attribute__((always_inline)) INLINE static int cell_get_flag(
return (c->flags & flag) > 0;
}
/**
* @brief Check if a cell has a recv task of the given subtype.
*/
__attribute__((always_inline)) INLINE static struct task *cell_get_recv(
const struct cell *c, enum task_subtypes subtype) {
#ifdef WITH_MPI
struct link *l = c->mpi.recv;
while (l != NULL && l->t->subtype != subtype) l = l->next;
return (l != NULL) ? l->t : NULL;
#else
return NULL;
#endif
}
#endif /* SWIFT_CELL_H */
......@@ -405,7 +405,7 @@ static void dumpCells_map(struct cell *c, void *data) {
* These define the edges of the partitions. */
int ismpiactive = 0;
#if WITH_MPI
ismpiactive = (c->mpi.hydro.send_xv != NULL);
ismpiactive = (c->mpi.send != NULL);
if (mpiactive)
mpiactive = ismpiactive;
else
......
......@@ -156,6 +156,12 @@ struct end_of_step_data {
*/
void engine_addlink(struct engine *e, struct link **l, struct task *t) {
#ifdef SWIFT_DEBUG_CHECKS
if (t == NULL) {
error("Trying to link NULL task.");
}
#endif
/* Get the next free link. */
const size_t ind = atomic_inc(&e->nr_links);
if (ind >= e->size_links) {
......@@ -2676,11 +2682,10 @@ void engine_barrier(struct engine *e) {
void engine_collect_end_of_step_recurse_hydro(struct cell *c,
const struct engine *e) {
/* Skip super-cells (Their values are already set) */
#ifdef WITH_MPI
if (c->timestep != NULL || c->mpi.hydro.recv_ti != NULL) return;
#else
/* Skip super-cells (Their values are already set) */
if (c->timestep != NULL) return;
#ifdef WITH_MPI
if (cell_get_recv(c, task_subtype_tend_part) != NULL) return;
#endif /* WITH_MPI */
#ifdef SWIFT_DEBUG_CHECKS
......@@ -2753,11 +2758,10 @@ void engine_collect_end_of_step_recurse_hydro(struct cell *c,
void engine_collect_end_of_step_recurse_grav(struct cell *c,
const struct engine *e) {
/* Skip super-cells (Their values are already set) */
#ifdef WITH_MPI
if (c->timestep != NULL || c->mpi.grav.recv_ti != NULL) return;
#else
/* Skip super-cells (Their values are already set) */
if (c->timestep != NULL) return;
#ifdef WITH_MPI
if (cell_get_recv(c, task_subtype_tend_gpart) != NULL) return;
#endif /* WITH_MPI */
#ifdef SWIFT_DEBUG_CHECKS
......@@ -2812,11 +2816,10 @@ void engine_collect_end_of_step_recurse_grav(struct cell *c,
void engine_collect_end_of_step_recurse_stars(struct cell *c,
const struct engine *e) {
/* Skip super-cells (Their values are already set) */
#ifdef WITH_MPI
if (c->timestep != NULL || c->mpi.stars.recv_ti != NULL) return;
#else
/* Skip super-cells (Their values are already set) */
if (c->timestep != NULL) return;
#ifdef WITH_MPI
if (cell_get_recv(c, task_subtype_tend_spart) != NULL) return;
#endif /* WITH_MPI */
#ifdef SWIFT_DEBUG_CHECKS
......
......@@ -104,8 +104,8 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
}
/* Add them to the local cell. */
engine_addlink(e, &ci->mpi.grav.send, t_grav);
engine_addlink(e, &ci->mpi.grav.send_ti, t_ti);
engine_addlink(e, &ci->mpi.send, t_grav);
engine_addlink(e, &ci->mpi.send, t_ti);
}
/* Recurse? */
......@@ -205,12 +205,12 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
}
/* Add them to the local cell. */
engine_addlink(e, &ci->mpi.hydro.send_xv, t_xv);
engine_addlink(e, &ci->mpi.hydro.send_rho, t_rho);
engine_addlink(e, &ci->mpi.send, t_xv);
engine_addlink(e, &ci->mpi.send, t_rho);
#ifdef EXTRA_HYDRO_LOOP
engine_addlink(e, &ci->mpi.hydro.send_gradient, t_gradient);
engine_addlink(e, &ci->mpi.send, t_gradient);
#endif
engine_addlink(e, &ci->mpi.hydro.send_ti, t_ti);
engine_addlink(e, &ci->mpi.send, t_ti);
}
/* Recurse? */
......@@ -277,8 +277,8 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
scheduler_addunlock(s, ci->super->timestep, t_ti);
}
engine_addlink(e, &ci->mpi.stars.send, t_feedback);
engine_addlink(e, &ci->mpi.stars.send_ti, t_ti);
engine_addlink(e, &ci->mpi.send, t_feedback);
engine_addlink(e, &ci->mpi.send, t_ti);
}
/* Recurse? */
......@@ -331,40 +331,45 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
c->mpi.tag, 0, c, NULL);
}
c->mpi.hydro.recv_xv = t_xv;
c->mpi.hydro.recv_rho = t_rho;
c->mpi.hydro.recv_gradient = t_gradient;
c->mpi.hydro.recv_ti = t_ti;
if (t_xv != NULL) {
engine_addlink(e, &c->mpi.recv, t_xv);
engine_addlink(e, &c->mpi.recv, t_rho);
#ifdef EXTRA_HYDRO_LOOP
engine_addlink(e, &c->mpi.recv, t_gradient);
#endif
engine_addlink(e, &c->mpi.recv, t_ti);
/* Add dependencies. */
if (c->hydro.sorts != NULL) {
scheduler_addunlock(s, t_xv, c->hydro.sorts);
scheduler_addunlock(s, c->hydro.sorts, t_rho);
}
/* Add dependencies. */
if (c->hydro.sorts != NULL) {
scheduler_addunlock(s, t_xv, c->hydro.sorts);
scheduler_addunlock(s, c->hydro.sorts, t_rho);
}
for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_xv, l->t);
scheduler_addunlock(s, l->t, t_rho);
}
for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_xv, l->t);
scheduler_addunlock(s, l->t, t_rho);
}
#ifdef EXTRA_HYDRO_LOOP
for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
scheduler_addunlock(s, l->t, t_gradient);
}
for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
scheduler_addunlock(s, t_gradient, l->t);
scheduler_addunlock(s, l->t, t_ti);
}
for (struct link *l = c->hydro.gradient; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
scheduler_addunlock(s, l->t, t_gradient);
}
for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
scheduler_addunlock(s, t_gradient, l->t);
scheduler_addunlock(s, l->t, t_ti);
}
#else
for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
scheduler_addunlock(s, l->t, t_ti);
}
for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
scheduler_addunlock(s, l->t, t_ti);
}
#endif
/* Make sure the density has been computed before the stars compute theirs. */
for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
/* Make sure the density has been computed before the stars compute theirs.
*/
for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_rho, l->t);
}
}
/* Recurse? */
......@@ -409,22 +414,24 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
c->mpi.tag, 0, c, NULL);
}
c->mpi.stars.recv = t_feedback;
c->mpi.stars.recv_ti = t_ti;
if (t_feedback != NULL) {
engine_addlink(e, &c->mpi.recv, t_feedback);
engine_addlink(e, &c->mpi.recv, t_ti);
#ifdef SWIFT_DEBUG_CHECKS
if (c->nodeID == e->nodeID) error("Local cell!");
if (c->nodeID == e->nodeID) error("Local cell!");
#endif
if (c->stars.sorts != NULL)
scheduler_addunlock(s, t_feedback, c->stars.sorts);
if (c->stars.sorts != NULL)
scheduler_addunlock(s, t_feedback, c->stars.sorts);
for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, l->t, t_feedback);
}
for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, l->t, t_feedback);
}
for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
scheduler_addunlock(s, t_feedback, l->t);
scheduler_addunlock(s, l->t, t_ti);
for (struct link *l = c->stars.feedback; l != NULL; l = l->next) {
scheduler_addunlock(s, t_feedback, l->t);
scheduler_addunlock(s, l->t, t_ti);