Commit dcad7a2b authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Added a force-communication with its dependency on the drift to make sure the...

Added a force-communication with its dependency on the drift to make sure the data is correctly drifted before being received in the star loop.
parent 67d40b16
......@@ -3098,7 +3098,7 @@ int cell_unskip_hydro_tasks(struct cell *c, struct scheduler *s) {
}
/* Store current values of dx_max and h_max. */
else if (t->type == task_type_sub_pair || t->type == task_type_sub_self) {
cell_activate_subcell_hydro_tasks(t->ci, t->cj, s);
cell_activate_subcell_hydro_tasks(ci, cj, s);
}
}
......@@ -3402,11 +3402,11 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
struct engine *e = s->space->e;
const int with_feedback = (e->policy & engine_policy_feedback);
// const int with_feedback = (e->policy & engine_policy_feedback);
const int nodeID = e->nodeID;
int rebuild = 0;
if (!with_feedback && c->stars.drift != NULL && cell_is_active_stars(c, e)) {
if (c->stars.drift != NULL && cell_is_active_stars(c, e)) {
cell_activate_drift_spart(c, s);
}
......@@ -3434,6 +3434,8 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Only activate tasks that involve a local active cell. */
if ((ci_active || cj_active) &&
(ci_nodeID == nodeID || cj_nodeID == nodeID)) {
// if ((ci_active && ci_nodeID == nodeID) ||
// (cj_active && cj_nodeID == nodeID)) {
scheduler_activate(s, t);
......@@ -3497,7 +3499,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
if (cj_active) {
scheduler_activate(s, ci->mpi.hydro.recv_xv);
scheduler_activate(s, ci->mpi.hydro.recv_rho);
scheduler_activate(s, ci->mpi.hydro.recv_force);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
......@@ -3515,7 +3517,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
scheduler_activate_send(s, cj->mpi.hydro.send_force, ci_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -3527,7 +3529,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active) {
scheduler_activate(s, cj->mpi.hydro.recv_xv);
scheduler_activate(s, cj->mpi.hydro.recv_rho);
scheduler_activate(s, cj->mpi.hydro.recv_force);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
......@@ -3545,7 +3547,7 @@ int cell_unskip_stars_tasks(struct cell *c, struct scheduler *s) {
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
scheduler_activate_send(s, ci->mpi.hydro.send_force, cj_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......
......@@ -594,6 +594,9 @@ struct cell {
/* Task receiving hydro data (gradient). */
struct task *recv_gradient;
/* Task receiving hydro data (force). */
struct task *recv_force;
/* Linked list for sending hydro data (positions). */
struct link *send_xv;
......@@ -603,6 +606,9 @@ struct cell {
/* Linked list for sending hydro data (gradient). */
struct link *send_gradient;
/* Linked list for sending hydro data (force). */
struct link *send_force;
} hydro;
struct {
......
......@@ -85,7 +85,7 @@ __attribute__((always_inline)) INLINE static void drift_part(
p->ti_drift = ti_current;
#endif
/* Drift... */
p->x[0] += xp->v_full[0] * dt_drift;
p->x[1] += xp->v_full[1] * dt_drift;
......
......@@ -120,7 +120,8 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci,
*/
void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
struct cell *cj, struct task *t_xv,
struct task *t_rho, struct task *t_gradient) {
struct task *t_rho, struct task *t_gradient,
struct task *t_force) {
#ifdef WITH_MPI
struct link *l = NULL;
......@@ -146,11 +147,15 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
0, ci, cj);
t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho,
ci->mpi.tag, 0, ci, cj);
#ifdef EXTRA_HYDRO_LOOP
t_gradient = scheduler_addtask(s, task_type_send, task_subtype_gradient,
ci->mpi.tag, 0, ci, cj);
#endif
t_force = scheduler_addtask(s, task_type_send, task_subtype_force,
ci->mpi.tag, 0, ci, cj);
#ifdef EXTRA_HYDRO_LOOP
scheduler_addunlock(s, t_gradient, ci->hydro.super->hydro.end_force);
......@@ -179,6 +184,9 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
#endif
scheduler_addunlock(s, ci->hydro.super->hydro.end_force, t_force);
scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_force);
/* Drift before you send */
scheduler_addunlock(s, ci->hydro.super->hydro.drift, t_xv);
}
......@@ -189,6 +197,7 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
#ifdef EXTRA_HYDRO_LOOP
engine_addlink(e, &ci->mpi.hydro.send_gradient, t_gradient);
#endif
engine_addlink(e, &ci->mpi.hydro.send_force, t_force);
}
/* Recurse? */
......@@ -196,7 +205,7 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
for (int k = 0; k < 8; k++)
if (ci->progeny[k] != NULL)
engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho,
t_gradient);
t_gradient, t_force);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -355,7 +364,7 @@ void engine_addtasks_send_timestep(struct engine *e, struct cell *ci,
*/
void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
struct task *t_xv, struct task *t_rho,
struct task *t_gradient) {
struct task *t_gradient, struct task *t_force) {
#ifdef WITH_MPI
struct scheduler *s = &e->sched;
......@@ -377,14 +386,21 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
t_gradient = scheduler_addtask(s, task_type_recv, task_subtype_gradient,
c->mpi.tag, 0, c, NULL);
#endif
t_force = scheduler_addtask(s, task_type_recv, task_subtype_force,
c->mpi.tag, 0, c, NULL);
}
c->mpi.hydro.recv_xv = t_xv;
c->mpi.hydro.recv_rho = t_rho;
c->mpi.hydro.recv_gradient = t_gradient;
c->mpi.hydro.recv_force = t_force;
/* Add dependencies. */
if (c->hydro.sorts != NULL) scheduler_addunlock(s, t_xv, c->hydro.sorts);
if (c->hydro.sorts != NULL) {
scheduler_addunlock(s, t_xv, c->hydro.sorts);
scheduler_addunlock(s, c->hydro.sorts, t_force);
}
for (struct link *l = c->hydro.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_xv, l->t);
......@@ -404,16 +420,20 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
}
#endif
for (struct link *l = c->hydro.force; l != NULL; l = l->next) {
scheduler_addunlock(s, l->t, t_force);
}
for (struct link *l = c->stars.density; l != NULL; l = l->next) {
scheduler_addunlock(s, t_xv, l->t);
scheduler_addunlock(s, t_rho, l->t);
scheduler_addunlock(s, t_force, l->t);
}
/* Recurse? */
if (c->split)
for (int k = 0; k < 8; k++)
if (c->progeny[k] != NULL)
engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient);
engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient,
t_force);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -847,7 +867,7 @@ void engine_make_hierarchical_tasks_hydro(struct engine *e, struct cell *c) {
c->stars.drift = scheduler_addtask(s, task_type_drift_spart,
task_subtype_none, 0, 0, c, NULL);
scheduler_addunlock(s, c->stars.drift, c->super->kick2);
scheduler_addunlock(s, c->stars.drift, c->super->kick2);
}
/* Subgrid tasks: cooling */
......@@ -2173,7 +2193,7 @@ void engine_addtasks_send_mapper(void *map_data, int num_elements,
* connection. */
if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
engine_addtasks_send_hydro(e, ci, cj, /*t_xv=*/NULL,
/*t_rho=*/NULL, /*t_gradient=*/NULL);
/*t_rho=*/NULL, /*t_gradient=*/NULL, NULL);
/* Add the send tasks for the cells in the proxy that have a stars
* connection. */
......@@ -2205,7 +2225,7 @@ void engine_addtasks_recv_mapper(void *map_data, int num_elements,
/* Add the recv tasks for the cells in the proxy that have a hydro
* connection. */
if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
engine_addtasks_recv_hydro(e, ci, NULL, NULL, NULL);
engine_addtasks_recv_hydro(e, ci, NULL, NULL, NULL, NULL);
/* Add the recv tasks for the cells in the proxy that have a stars
* connection. */
......
......@@ -445,7 +445,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if (cj_active_stars) {
scheduler_activate(s, ci->mpi.hydro.recv_xv);
scheduler_activate(s, ci->mpi.hydro.recv_rho);
scheduler_activate(s, ci->mpi.hydro.recv_force);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, cj->mpi.stars.send, ci_nodeID);
......@@ -463,7 +463,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, cj->mpi.hydro.send_xv, ci_nodeID);
scheduler_activate_send(s, cj->mpi.hydro.send_rho, ci_nodeID);
scheduler_activate_send(s, cj->mpi.hydro.send_force, ci_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......@@ -475,7 +475,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
/* If the local cell is active, receive data from the foreign cell. */
if (ci_active_stars) {
scheduler_activate(s, cj->mpi.hydro.recv_xv);
scheduler_activate(s, cj->mpi.hydro.recv_rho);
scheduler_activate(s, cj->mpi.hydro.recv_force);
/* If the local cell is active, more stuff will be needed. */
scheduler_activate_send(s, ci->mpi.stars.send, cj_nodeID);
......@@ -493,7 +493,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
/* Is the foreign cell active and will need stuff from us? */
scheduler_activate_send(s, ci->mpi.hydro.send_xv, cj_nodeID);
scheduler_activate_send(s, ci->mpi.hydro.send_rho, cj_nodeID);
scheduler_activate_send(s, ci->mpi.hydro.send_force, cj_nodeID);
/* Drift the cell which will be sent; note that not all sent
particles will be drifted, only those that are needed. */
......
......@@ -54,7 +54,7 @@ extern int engine_rank;
fprintf(stderr, "[%04i] %s %s:%s():%i: " s "\n", engine_rank, \
clocks_get_timesincestart(), __FILE__, __FUNCTION__, __LINE__, \
##__VA_ARGS__); \
MPI_Abort(MPI_COMM_WORLD, -1); \
swift_abort(-1); \
})
#else
#define error(s, ...) \
......
......@@ -931,7 +931,7 @@ void runner_do_hydro_sort(struct runner *r, struct cell *c, int flags,
/* Fill the sort array. */
for (int k = 0; k < count; k++) {
const double px[3] = {parts[k].x[0], parts[k].x[1], parts[k].x[2]};
const double px[3] = {parts[k].x[0], parts[k].x[1], parts[k].x[2]};
for (int j = 0; j < 13; j++)
if (flags & (1 << j)) {
c->hydro.sort[j][k].i = k;
......@@ -3273,7 +3273,7 @@ void *runner_main(void *data) {
break;
#endif
case task_type_stars_ghost:
runner_do_stars_ghost(r, ci, 1);
runner_do_stars_ghost(r, ci, 1);
break;
case task_type_drift_part:
runner_do_drift_part(r, ci, 1);
......@@ -3321,6 +3321,8 @@ void *runner_main(void *data) {
runner_do_recv_part(r, ci, 0, 1);
} else if (t->subtype == task_subtype_gradient) {
runner_do_recv_part(r, ci, 0, 1);
} else if (t->subtype == task_subtype_force) {
runner_do_recv_part(r, ci, 0, 1);
} else if (t->subtype == task_subtype_limiter) {
runner_do_recv_part(r, ci, 0, 1);
} else if (t->subtype == task_subtype_gpart) {
......
......@@ -1233,9 +1233,9 @@ void DOPAIR1_BRANCH(struct runner *r, struct cell *ci, struct cell *cj) {
error(
"particle shift diff exceeds dx_max_sort in cell ci. ci->nodeID=%d "
"cj->nodeID=%d d=%e sort_i[pid].d=%e ci->hydro.dx_max_sort=%e "
"ci->hydro.dx_max_sort_old=%e",
"ci->hydro.dx_max_sort_old=%e pid=%d count=%d",
ci->nodeID, cj->nodeID, d, sort_i[pid].d, ci->hydro.dx_max_sort,
ci->hydro.dx_max_sort_old);
ci->hydro.dx_max_sort_old, pid, ci->hydro.count);
}
for (int pjd = 0; pjd < cj->hydro.count; pjd++) {
const struct part *p = &cj->hydro.parts[sort_j[pjd].i];
......
......@@ -1849,13 +1849,11 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_xv ||
t->subtype == task_subtype_rho ||
t->subtype == task_subtype_gradient) {
t->subtype == task_subtype_gradient ||
t->subtype == task_subtype_force) {
err = MPI_Irecv(t->ci->hydro.parts, t->ci->hydro.count, part_mpi_type,
t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
// message( "receiving %i parts with tag=%i from %i to %i." ,
// t->ci->hydro.count , t->flags , t->ci->nodeID , s->nodeID );
// fflush(stdout);
} else if (t->subtype == task_subtype_gpart) {
err = MPI_Irecv(t->ci->grav.parts, t->ci->grav.count, gpart_mpi_type,
t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
......@@ -1900,7 +1898,8 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_xv ||
t->subtype == task_subtype_rho ||
t->subtype == task_subtype_gradient) {
t->subtype == task_subtype_gradient ||
t->subtype == task_subtype_force) {
if ((t->ci->hydro.count * sizeof(struct part)) > s->mpi_message_limit)
err = MPI_Isend(t->ci->hydro.parts, t->ci->hydro.count,
part_mpi_type, t->cj->nodeID, t->flags,
......@@ -1909,9 +1908,6 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
err = MPI_Issend(t->ci->hydro.parts, t->ci->hydro.count,
part_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
// message( "sending %i parts with tag=%i from %i to %i." ,
// t->ci->hydro.count , t->flags , s->nodeID , t->cj->nodeID );
// fflush(stdout);
} else if (t->subtype == task_subtype_gpart) {
if ((t->ci->grav.count * sizeof(struct gpart)) > s->mpi_message_limit)
err = MPI_Isend(t->ci->grav.parts, t->ci->grav.count,
......
......@@ -275,6 +275,7 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
c->mpi.hydro.recv_xv = NULL;
c->mpi.hydro.recv_rho = NULL;
c->mpi.hydro.recv_gradient = NULL;
c->mpi.hydro.recv_force = NULL;
c->mpi.grav.recv = NULL;
c->mpi.stars.recv = NULL;
c->mpi.recv_ti = NULL;
......@@ -283,6 +284,7 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
c->mpi.hydro.send_xv = NULL;
c->mpi.hydro.send_rho = NULL;
c->mpi.hydro.send_gradient = NULL;
c->mpi.hydro.send_force = NULL;
c->mpi.grav.send = NULL;
c->mpi.stars.send = NULL;
c->mpi.send_ti = NULL;
......@@ -550,9 +552,11 @@ void space_regrid(struct space *s, int verbose) {
c->mpi.hydro.recv_xv = NULL;
c->mpi.hydro.recv_rho = NULL;
c->mpi.hydro.recv_gradient = NULL;
c->mpi.hydro.recv_force = NULL;
c->mpi.hydro.send_xv = NULL;
c->mpi.hydro.send_rho = NULL;
c->mpi.hydro.send_gradient = NULL;
c->mpi.hydro.send_force = NULL;
c->mpi.stars.send = NULL;
c->mpi.stars.recv = NULL;
c->mpi.grav.recv = NULL;
......
......@@ -688,7 +688,11 @@ void task_get_group_name(int type, int subtype, char *cluster) {
}
break;
case task_subtype_force:
strcpy(cluster, "Force");
if (type == task_type_send || type == task_type_recv) {
strcpy(cluster, "None");
} else {
strcpy(cluster, "Force");
}
break;
case task_subtype_grav:
strcpy(cluster, "Gravity");
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment