Commit fedf9b45 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Unskip the send/recv of gparts and multipoles.

parent f4fa72c0
......@@ -2061,10 +2061,12 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
struct task *t = l->t;
struct cell *ci = t->ci;
struct cell *cj = t->cj;
const int ci_active = cell_is_active(ci, e);
const int cj_active = (cj != NULL) ? cell_is_active(cj, e) : 0;
/* Only activate tasks that involve a local active cell. */
if ((cell_is_active(ci, e) && ci->nodeID == engine_rank) ||
(cj != NULL && cell_is_active(cj, e) && cj->nodeID == engine_rank)) {
if ((ci_active && ci->nodeID == engine_rank) ||
(cj_active && cj->nodeID == engine_rank)) {
scheduler_activate(s, t);
/* Set the drifting flags */
......@@ -2075,6 +2077,65 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) {
cell_activate_subcell_grav_tasks(t->ci, NULL, s);
} else if (t->type == task_type_pair) {
cell_activate_subcell_grav_tasks(t->ci, t->cj, s);
#ifdef WITH_MPI
/* Activate the send/recv tasks. */
if (ci->nodeID != engine_rank) {
/* If the local cell is active, receive data from the foreign cell. */
if (cj_active) {
scheduler_activate(s, ci->recv_grav);
scheduler_activate(s, ci->recv_multipole);
}
/* If the foreign cell is active, we want its ti_end values. */
if (ci_active) scheduler_activate(s, ci->recv_ti);
/* Is the foreign cell active and will need stuff from us? */
if (ci_active) {
scheduler_activate_send(s, cj->send_grav, ci->nodeID);
/* Drift the cell which will be sent at the level at which it is
sent, i.e. drift the cell specified in the send task (l->t)
itself. */
cell_activate_drift_gpart(cj, s);
scheduler_activate_send(s, cj->send_multipole, ci->nodeID);
}
/* If the local cell is active, send its ti_end values. */
if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
} else if (cj->nodeID != engine_rank) {
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active) {
scheduler_activate(s, cj->recv_grav);
scheduler_activate(s, cj->recv_multipole);
}
/* If the foreign cell is active, we want its ti_end values. */
if (cj_active) scheduler_activate(s, cj->recv_ti);
/* Is the foreign cell active and will need stuff from us? */
if (cj_active) {
scheduler_activate_send(s, ci->send_grav, cj->nodeID);
/* Drift the cell which will be sent at the level at which it is
sent, i.e. drift the cell specified in the send task (l->t)
itself. */
cell_activate_drift_gpart(ci, s);
scheduler_activate_send(s, ci->send_multipole, cj->nodeID);
}
/* If the local cell is active, send its ti_end values. */
if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
}
#endif
}
}
}
......
......@@ -114,6 +114,6 @@
//#define SOURCETERMS_SN_FEEDBACK
//#define ICHECK 5726454604296ll
#define ICHECK 5268994168350ll
//#define ICHECK 6745760614196ll
#endif /* SWIFT_CONST_H */
......@@ -1168,7 +1168,7 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell *
* @param t_multi The send_multi #task, if it has already been created.
*/
void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell *cj,
struct task *t_grav, struct task *t_multi ) {
struct task *t_grav, struct task *t_multi, struct task *t_ti) {
#ifdef WITH_MPI
struct link *l = NULL;
......@@ -1193,24 +1193,32 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell
t_multi = scheduler_addtask(s, task_type_send, task_subtype_multipole, 6 * ci->tag + 5,
0, ci, cj);
t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend,
6 * ci->tag + 2, 0, ci, cj);
/* The sends should unlock the down pass. */
scheduler_addunlock(s, t_multi, ci->super->grav_down);
scheduler_addunlock(s, t_grav, ci->super->grav_down);
/* Drift before you send */
scheduler_addunlock(s, ci->super->drift_gpart, t_grav);
scheduler_addunlock(s, ci->super->init_grav, t_multi);
/* The super-cell's timestep task should unlock the send_ti task. */
scheduler_addunlock(s, ci->super->timestep, t_ti);
}
/* Add them to the local cell. */
engine_addlink(e, &ci->send_grav, t_grav);
engine_addlink(e, &ci->send_multipole, t_multi);
engine_addlink(e, &ci->send_ti, t_ti);
}
/* Recurse? */
if (ci->split)
for (int k = 0; k < 8; k++)
if (ci->progeny[k] != NULL)
engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi);
engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi, t_ti);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -1301,7 +1309,7 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t
* @param t_grav The recv_gpart #task, if it has already been created.
* @param t_multi The recv_multipole #task, if it has already been created.
*/
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task *t_grav, struct task *t_multi) {
void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task *t_grav, struct task *t_multi, struct task *t_ti) {
#ifdef WITH_MPI
struct scheduler *s = &e->sched;
......@@ -1314,21 +1322,26 @@ void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task
c, NULL);
t_multi = scheduler_addtask(s, task_type_recv, task_subtype_multipole, 6 * c->tag + 5, 0,
c, NULL);
t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend,
6 * c->tag + 2, 0, c, NULL);
}
c->recv_grav = t_grav;
c->recv_multipole = t_multi;
c->recv_ti = t_ti;
for (struct link *l = c->grav; l != NULL; l = l->next) {
scheduler_addunlock(s, t_grav, l->t);
scheduler_addunlock(s, t_multi, l->t);
scheduler_addunlock(s, l->t, t_ti);
}
/* Recurse? */
if (c->split)
for (int k = 0; k < 8; k++)
if (c->progeny[k] != NULL)
engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_multi);
engine_addtasks_recv_gravity(e, c->progeny[k], t_grav, t_multi, t_ti);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -2788,7 +2801,7 @@ void engine_maketasks(struct engine *e) {
if(e->policy & engine_policy_self_gravity)
for (int k = 0; k < p->nr_cells_in; k++)
engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL);
engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL, NULL);
/* Loop through the proxy's outgoing cells and add the
send tasks. */
......@@ -2799,7 +2812,7 @@ void engine_maketasks(struct engine *e) {
if(e->policy & engine_policy_self_gravity)
for (int k = 0; k < p->nr_cells_out; k++)
engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, NULL);
engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, NULL, NULL);
}
}
#endif
......@@ -3024,6 +3037,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate(s, ci->recv_multipole);
}
/* If the foreign cell is active, we want its ti_end values. */
if (ci_active) scheduler_activate(s, ci->recv_ti);
/* Is the foreign cell active and will need stuff from us? */
if (ci_active) {
......@@ -3037,6 +3053,10 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate_send(s, cj->send_multipole, ci->nodeID);
}
/* If the local cell is active, send its ti_end values. */
if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID);
} else if (cj->nodeID != engine_rank) {
/* If the local cell is active, receive data from the foreign cell. */
......@@ -3045,6 +3065,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate(s, cj->recv_multipole);
}
/* If the foreign cell is active, we want its ti_end values. */
if (cj_active) scheduler_activate(s, cj->recv_ti);
/* Is the foreign cell active and will need stuff from us? */
if (cj_active) {
......@@ -3059,6 +3082,9 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate_send(s, ci->send_multipole, cj->nodeID);
}
/* If the local cell is active, send its ti_end values. */
if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID);
}
#endif
}
......
......@@ -232,7 +232,7 @@ INLINE static void gravity_drift(struct gravity_tensors *m, double dt,
m->CoM[2] += dz;
/* Conservative change in maximal radius containing all gpart */
m->r_max = m->r_max_rebuild + x_diff;
m->r_max = m->r_max_rebuild + 0.*x_diff;
}
/**
......
......@@ -1422,8 +1422,9 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
#if (ICHECK != 0)
for(int i=0; i < c->gcount; ++i)
if(c->gparts[i].id_or_neg_offset == ICHECK)
message("Found gpart");
if(c->gparts[i].id_or_neg_offset == ICHECK) {
message("Found gpart"); fflush(stdout);
}
#endif
......@@ -1484,7 +1485,7 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) {
/* Check that this gpart has interacted with all the other
* particles (via direct or multipoles) in the box */
if (gp->num_interacted != e->total_nr_gparts /*&& gp->id_or_neg_offset == ICHECK*/)
if (gp->num_interacted != e->total_nr_gparts && gp->id_or_neg_offset == ICHECK)
error(
"g-particle (id=%lld, type=%s) did not interact "
"gravitationally "
......
/*******************************************************************************
* This file is part of SWIFT.
* Copyright (c) 2013 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
......@@ -135,6 +136,7 @@ void runner_do_grav_down(struct runner *r, struct cell *c, int timer) {
void runner_dopair_grav_mm(const struct runner *r, struct cell *restrict ci,
struct cell *restrict cj) {
/* Some constants */
const struct engine *e = r->e;
const struct space *s = e->s;
......@@ -1237,14 +1239,15 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
(abs(j-jj) <= 1 || abs(j-jj - cdim[1]) <= 1 || abs(j-jj + cdim[1]) <= 1) &&
(abs(k-kk) <= 1 || abs(k-kk - cdim[2]) <= 1 || abs(k-kk + cdim[2]) <= 1)) {
#if (ICHECK != 0)
if(check) {
++direct_ngbs;
direct_ngbs_gpart += cj->multipole->m_pole.num_gpart;
/* message("Found direct neighbour %d: (i,j,k)=(%d,%d,%d) (ii,jj,kk)=(%d,%d,%d) nodeID=%d", */
/* direct_ngbs, i,j,k, ii,jj,kk, cj->nodeID); */
}
#endif
/* #if (ICHECK != 0) */
/* if(check) { */
/* ++direct_ngbs; */
/* direct_ngbs_gpart += cj->multipole->m_pole.num_gpart; */
/* message("Found direct neighbour %d: (i,j,k)=(%d,%d,%d) (ii,jj,kk)=(%d,%d,%d) nodeID=%d", */
/* direct_ngbs, i,j,k, ii,jj,kk, cj->nodeID); */
/* } */
/* #endif */
}else{
......@@ -1287,11 +1290,6 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
}
} /* We are in charge of this pair */
} /* Loop over top-level cells */
if(check)
message("Interacted with %d indirectly and ignored %d direct interactions (counter=%lld) nr_cells=%d",
other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells);
#ifdef SWIFT_DEBUG_CHECKS
......@@ -1300,6 +1298,10 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) {
error("Not found the right number of particles in top-level interactions");
#endif
if(check)
message("Interacted with %d indirectly and ignored %d direct interactions (counter=%lld) nr_cells=%d total=%lld",
other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells, e->total_nr_gparts);
if (timer) TIMER_TOC(timer_dograv_long_range);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment