Skip to content
Snippets Groups Projects
Commit 76587e71 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Traced the bug back to the splitting condition in scheduler_splittask not...

Traced the bug back to the splitting condition in scheduler_splittask not being symmetric because of rounding
parent 20f107e9
No related branches found
No related tags found
1 merge request!589Mpi periodic gravity
......@@ -2514,6 +2514,12 @@ void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements,
/* Are the cells too close for a MM interaction ? */
if (!cell_can_use_pair_mm_rebuild(ci, cj, e, s)) {
if(ci->cellID == -111895 || cj->cellID == -111895) {
message("Constructing grav task! t->ci->cellID= %d t->cj->cellID= %d t->ci->nodeID= %d t->cj->nodeID= %d",
ci->cellID, cj->cellID, ci->nodeID, cj->nodeID);
}
/* Ok, we need to add a direct pair calculation */
scheduler_addtask(sched, task_type_pair, task_subtype_grav, 0, 0,
ci, cj);
......@@ -2700,6 +2706,14 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
engine_addlink(e, &ci->density, t);
engine_addlink(e, &cj->density, t);
} else if (t_subtype == task_subtype_grav) {
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Task linked to ci and cj");
}
engine_addlink(e, &ci->grav, t);
engine_addlink(e, &cj->grav, t);
}
......@@ -3268,6 +3282,25 @@ void engine_maketasks(struct engine *e) {
tic2 = getticks();
for(int i = 0; i<e->sched.nr_tasks; ++i) {
struct task *t = &e->sched.tasks[i];
if(t->type == task_type_pair && t->subtype == task_subtype_grav) {
struct cell *ci = t->ci;
struct cell *cj = t->cj;
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Found the task!");
}
}
}
/* Split the tasks. */
scheduler_splittasks(sched);
......@@ -4878,6 +4911,39 @@ void engine_step(struct engine *e) {
/* Print the number of active tasks ? */
if (e->step == 43) engine_print_task_counts(e);
if (e->step == 43) {
for(int i = 0; i < e->s->nr_cells; ++i) {
const struct cell *c = &e->s->cells_top[i];
if(c->cellID == -111895) {
message("c->loc= [%f %f %f]", c->loc[0], c->loc[1], c->loc[2]);
message("c->depth= %d", c->depth);
message("c->nodeID= %d", c->nodeID);
message("c->gcount= %d c->count= %d c->scount= %d", c->gcount, c->count, c->scount);
message("c->ti_hydro_end_min= %lld c->ti_gravity_end_min= %lld", c->ti_hydro_end_min, c->ti_gravity_end_min);
#ifdef WITH_MPI
message("c->recv_grav= %p", c->recv_grav);
if(c->recv_grav)
message("c->recv_grav->skip= %d c->recv_grav->wait= %d", c->recv_grav->skip, c->recv_grav->wait);
if(c->send_grav)
for(struct link *l = c->send_grav; l!=NULL; l = l->next)
message("Send task: t->cj->nodeID=%d t->skip=%d", l->t->cj->nodeID, l->t->skip);
if(c->grav)
for(struct link *l = c->grav; l!=NULL; l = l->next)
if(l->t->type == task_type_pair)
message("grav task t->wait=%d t->skip=%d t->ci->cellID= %d t->cj->cellID= %d t->ci->nodeID= %d t->cj->nodeID= %d",
l->t->wait, l->t->skip, l->t->ci->cellID, l->t->cj->cellID, l->t->ci->nodeID, l->t->cj->nodeID);
#endif
}
}
}
/* Dump local cells and active particle counts. */
/* dumpCells("cells", 0, 0, 0, 0, e->s, e->nodeID, e->step); */
......
......@@ -888,12 +888,28 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
break;
}
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Found the task! ci->cellID=%d cj->cellID=%d ci->nodeID=%d cj->nodeID=%d ci->gcount=%d cj->gcount=%d ci->split=%d cj->split=%d ci->depth=%d cj->depth=%d",
ci->cellID, cj->cellID, ci->nodeID, cj->nodeID, ci->gcount, cj->gcount,
ci->split, cj->split, ci->depth, cj->depth);
}
/* Should we replace it with an M-M task? */
if (cell_can_use_pair_mm_rebuild(ci, cj, e, sp)) {
t->type = task_type_grav_mm;
t->subtype = task_subtype_none;
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Replaced by M-M task!");
}
/* Since this task will not be split, we can already link it */
atomic_inc(&ci->nr_mm_tasks);
atomic_inc(&cj->nr_mm_tasks);
......@@ -906,9 +922,20 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
if (cell_can_split_pair_gravity_task(ci) &&
cell_can_split_pair_gravity_task(cj)) {
const long long gcount_i = ci->gcount;
const long long gcount_j = cj->gcount;
/* Replace by a single sub-task? */
if (scheduler_dosub && /* Use division to avoid integer overflow. */
ci->gcount < space_subsize_pair_grav / cj->gcount) {
gcount_i * gcount_j < ((long long) space_subsize_pair_grav)) {
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Do nothing!");
}
/* Otherwise, split it. */
} else {
......@@ -916,6 +943,13 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
/* Take a step back (we're going to recycle the current task)... */
redo = 1;
if((ci->cellID == -91806 && cj->cellID == -111895) ||
(cj->cellID == -91806 && ci->cellID == -111895)) {
message("Split into smaller tasks!");
}
/* Find the first non-empty childrens of the cells */
int first_ci_child = 0, first_cj_child = 0;
while (ci->progeny[first_ci_child] == NULL) first_ci_child++;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment