Commit 19caade8 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Merge branch 'merged_mm_tasks' into 'master'

Merged gravity M-M tasks

See merge request !606
parents a0e20aed f45ad952
......@@ -1059,7 +1059,7 @@ int main(int argc, char *argv[]) {
if (!e.sched.tasks[l].implicit && e.sched.tasks[l].toc != 0) {
fprintf(
file_thread,
" %03i %i %i %i %i %lli %lli %i %i %i %i %i %i\n", myrank,
" %03i %i %i %i %i %lli %lli %i %i %i %i %lli %i\n", myrank,
e.sched.tasks[l].rid, e.sched.tasks[l].type,
e.sched.tasks[l].subtype, (e.sched.tasks[l].cj == NULL),
e.sched.tasks[l].tic, e.sched.tasks[l].toc,
......
......@@ -129,7 +129,7 @@ FULLTYPES = ["self/force", "self/density", "self/grav", "sub_self/force",
colours = ["cyan", "lightgray", "darkblue", "yellow", "tan", "dodgerblue",
"sienna", "aquamarine", "bisque", "blue", "green", "lightgreen",
"brown", "purple", "moccasin", "olivedrab", "chartreuse",
"darksage", "darkgreen", "green", "mediumseagreen",
"olive", "darkgreen", "green", "mediumseagreen",
"mediumaquamarine", "darkslategrey", "mediumturquoise",
"black", "cadetblue", "skyblue", "red", "slategray", "gold",
"slateblue", "blueviolet", "mediumorchid", "firebrick",
......
......@@ -96,7 +96,7 @@ pl.rcParams.update(PLOT_PARAMS)
colours = ["cyan", "lightgray", "darkblue", "yellow", "tan", "dodgerblue",
"sienna", "aquamarine", "bisque", "blue", "green", "lightgreen",
"brown", "purple", "moccasin", "olivedrab", "chartreuse",
"darksage", "darkgreen", "green", "mediumseagreen",
"olive", "darkgreen", "green", "mediumseagreen",
"mediumaquamarine", "darkslategrey", "mediumturquoise",
"black", "cadetblue", "skyblue", "red", "slategray", "gold",
"slateblue", "blueviolet", "mediumorchid", "firebrick",
......
......@@ -2017,33 +2017,6 @@ void cell_activate_subcell_hydro_tasks(struct cell *ci, struct cell *cj,
} /* Otherwise, pair interation */
}
/**
* @brief Drift the multipoles that will be used in a M-M task.
*
* @param ci The first #cell we update.
* @param cj The second #cell we update.
* @param s The task #scheduler.
*/
void cell_activate_grav_mm_task(struct cell *ci, struct cell *cj,
struct scheduler *s) {
/* Some constants */
const struct engine *e = s->space->e;
/* Anything to do here? */
if (!cell_is_active_gravity_mm(ci, e) && !cell_is_active_gravity_mm(cj, e))
error("Inactive MM task being activated");
/* Atomically drift the multipole in ci */
lock_lock(&ci->mlock);
if (ci->ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
if (lock_unlock(&ci->mlock) != 0) error("Impossible to unlock m-pole");
/* Atomically drift the multipole in cj */
lock_lock(&cj->mlock);
if (cj->ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
if (lock_unlock(&cj->mlock) != 0) error("Impossible to unlock m-pole");
}
/**
* @brief Traverse a sub-cell task and activate the gravity drift tasks that
* are required by a self gravity task.
......@@ -2490,7 +2463,7 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
struct cell *ci = t->ci;
struct cell *cj = t->cj;
const int ci_active = cell_is_active_gravity_mm(ci, e);
const int cj_active = (cj != NULL) ? cell_is_active_gravity_mm(cj, e) : 0;
const int cj_active = cell_is_active_gravity_mm(cj, e);
#ifdef WITH_MPI
const int ci_nodeID = ci->nodeID;
const int cj_nodeID = (cj != NULL) ? cj->nodeID : -1;
......@@ -2508,9 +2481,6 @@ int cell_unskip_gravity_tasks(struct cell *c, struct scheduler *s) {
(cj_active && cj_nodeID == nodeID)) {
scheduler_activate(s, t);
/* Drift the multipoles */
cell_activate_grav_mm_task(ci, cj, s);
}
}
......@@ -3066,6 +3036,9 @@ int cell_can_use_pair_mm(const struct cell *ci, const struct cell *cj,
/**
* @brief Can we use the MM interactions fo a given pair of cells?
*
* This function uses the information gathered in the multipole at rebuild
* time and not the current position and radius of the multipole.
*
* @param ci The first #cell.
* @param cj The second #cell.
* @param e The #engine.
......@@ -3085,32 +3058,32 @@ int cell_can_use_pair_mm_rebuild(const struct cell *ci, const struct cell *cj,
#ifdef SWIFT_DEBUG_CHECKS
if (multi_i->CoM[0] < ci->loc[0] ||
multi_i->CoM[0] > ci->loc[0] + ci->width[0])
if (multi_i->CoM_rebuild[0] < ci->loc[0] ||
multi_i->CoM_rebuild[0] > ci->loc[0] + ci->width[0])
error("Invalid multipole position ci");
if (multi_i->CoM[1] < ci->loc[1] ||
multi_i->CoM[1] > ci->loc[1] + ci->width[1])
if (multi_i->CoM_rebuild[1] < ci->loc[1] ||
multi_i->CoM_rebuild[1] > ci->loc[1] + ci->width[1])
error("Invalid multipole position ci");
if (multi_i->CoM[2] < ci->loc[2] ||
multi_i->CoM[2] > ci->loc[2] + ci->width[2])
if (multi_i->CoM_rebuild[2] < ci->loc[2] ||
multi_i->CoM_rebuild[2] > ci->loc[2] + ci->width[2])
error("Invalid multipole position ci");
if (multi_j->CoM[0] < cj->loc[0] ||
multi_j->CoM[0] > cj->loc[0] + cj->width[0])
if (multi_j->CoM_rebuild[0] < cj->loc[0] ||
multi_j->CoM_rebuild[0] > cj->loc[0] + cj->width[0])
error("Invalid multipole position cj");
if (multi_j->CoM[1] < cj->loc[1] ||
multi_j->CoM[1] > cj->loc[1] + cj->width[1])
if (multi_j->CoM_rebuild[1] < cj->loc[1] ||
multi_j->CoM_rebuild[1] > cj->loc[1] + cj->width[1])
error("Invalid multipole position cj");
if (multi_j->CoM[2] < cj->loc[2] ||
multi_j->CoM[2] > cj->loc[2] + cj->width[2])
if (multi_j->CoM_rebuild[2] < cj->loc[2] ||
multi_j->CoM_rebuild[2] > cj->loc[2] + cj->width[2])
error("Invalid multipole position cj");
#endif
/* Get the distance between the CoMs */
double dx = multi_i->CoM[0] - multi_j->CoM[0];
double dy = multi_i->CoM[1] - multi_j->CoM[1];
double dz = multi_i->CoM[2] - multi_j->CoM[2];
double dx = multi_i->CoM_rebuild[0] - multi_j->CoM_rebuild[0];
double dy = multi_i->CoM_rebuild[1] - multi_j->CoM_rebuild[1];
double dz = multi_i->CoM_rebuild[2] - multi_j->CoM_rebuild[2];
/* Apply BC */
if (periodic) {
......@@ -3120,5 +3093,6 @@ int cell_can_use_pair_mm_rebuild(const struct cell *ci, const struct cell *cj,
}
const double r2 = dx * dx + dy * dy + dz * dz;
return gravity_M2L_accept(multi_i->r_max, multi_j->r_max, theta_crit2, r2);
return gravity_M2L_accept(multi_i->r_max_rebuild, multi_j->r_max_rebuild,
theta_crit2, r2);
}
......@@ -363,7 +363,7 @@ void engine_make_hierarchical_tasks_gravity(struct engine *e, struct cell *c) {
}
/* We are below the super-cell but not below the maximal splitting depth */
else if (c->super_gravity != NULL && c->depth <= space_subdepth_grav) {
else if (c->super_gravity != NULL && c->depth < space_subdepth_grav) {
/* Local tasks only... */
if (c->nodeID == e->nodeID) {
......@@ -2674,9 +2674,13 @@ void engine_count_and_link_tasks_mapper(void *map_data, int num_elements,
}
#endif
/* Note that we do not need to link the M-M tasks */
/* since we already did so when splitting the gravity */
/* tasks. */
/* Multipole-multipole interaction of progenies */
} else if (t_type == task_type_grav_mm) {
atomic_inc(&ci->nr_mm_tasks);
atomic_inc(&cj->nr_mm_tasks);
engine_addlink(e, &ci->grav_mm, t);
engine_addlink(e, &cj->grav_mm, t);
}
}
}
......@@ -2705,6 +2709,10 @@ void engine_link_gravity_tasks(struct engine *e) {
const enum task_types t_type = t->type;
const enum task_subtypes t_subtype = t->subtype;
struct cell *ci_parent = (ci->parent != NULL) ? ci->parent : ci;
struct cell *cj_parent =
(cj != NULL && cj->parent != NULL) ? cj->parent : cj;
/* Node ID (if running with MPI) */
#ifdef WITH_MPI
const int ci_nodeID = ci->nodeID;
......@@ -2724,8 +2732,8 @@ void engine_link_gravity_tasks(struct engine *e) {
/* drift ---+-> gravity --> grav_down */
/* init --/ */
scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, ci->init_grav_out, t);
scheduler_addunlock(sched, t, ci->grav_down_in);
scheduler_addunlock(sched, ci_parent->init_grav_out, t);
scheduler_addunlock(sched, t, ci_parent->grav_down_in);
}
/* Self-interaction for external gravity ? */
......@@ -2748,8 +2756,8 @@ void engine_link_gravity_tasks(struct engine *e) {
/* drift ---+-> gravity --> grav_down */
/* init --/ */
scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, ci->init_grav_out, t);
scheduler_addunlock(sched, t, ci->grav_down_in);
scheduler_addunlock(sched, ci_parent->init_grav_out, t);
scheduler_addunlock(sched, t, ci_parent->grav_down_in);
}
if (cj_nodeID == nodeID) {
......@@ -2757,8 +2765,11 @@ void engine_link_gravity_tasks(struct engine *e) {
/* init --/ */
if (ci->super_gravity != cj->super_gravity) /* Avoid double unlock */
scheduler_addunlock(sched, cj->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, cj->init_grav_out, t);
scheduler_addunlock(sched, t, cj->grav_down_in);
if (ci_parent != cj_parent) { /* Avoid double unlock */
scheduler_addunlock(sched, cj_parent->init_grav_out, t);
scheduler_addunlock(sched, t, cj_parent->grav_down_in);
}
}
}
......@@ -2771,8 +2782,8 @@ void engine_link_gravity_tasks(struct engine *e) {
/* drift ---+-> gravity --> grav_down */
/* init --/ */
scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, ci->init_grav_out, t);
scheduler_addunlock(sched, t, ci->grav_down_in);
scheduler_addunlock(sched, ci_parent->init_grav_out, t);
scheduler_addunlock(sched, t, ci_parent->grav_down_in);
}
/* Sub-self-interaction for external gravity ? */
......@@ -2796,8 +2807,8 @@ void engine_link_gravity_tasks(struct engine *e) {
/* drift ---+-> gravity --> grav_down */
/* init --/ */
scheduler_addunlock(sched, ci->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, ci->init_grav_out, t);
scheduler_addunlock(sched, t, ci->grav_down_in);
scheduler_addunlock(sched, ci_parent->init_grav_out, t);
scheduler_addunlock(sched, t, ci_parent->grav_down_in);
}
if (cj_nodeID == nodeID) {
......@@ -2805,8 +2816,11 @@ void engine_link_gravity_tasks(struct engine *e) {
/* init --/ */
if (ci->super_gravity != cj->super_gravity) /* Avoid double unlock */
scheduler_addunlock(sched, cj->super_gravity->drift_gpart, t);
scheduler_addunlock(sched, cj->init_grav_out, t);
scheduler_addunlock(sched, t, cj->grav_down_in);
if (ci_parent != cj_parent) { /* Avoid double unlock */
scheduler_addunlock(sched, cj_parent->init_grav_out, t);
scheduler_addunlock(sched, t, cj_parent->grav_down_in);
}
}
}
......@@ -2816,14 +2830,16 @@ void engine_link_gravity_tasks(struct engine *e) {
if (ci_nodeID == nodeID) {
/* init -----> gravity --> grav_down */
scheduler_addunlock(sched, ci->init_grav_out, t);
scheduler_addunlock(sched, t, ci->grav_down_in);
scheduler_addunlock(sched, ci_parent->init_grav_out, t);
scheduler_addunlock(sched, t, ci_parent->grav_down_in);
}
if (cj_nodeID == nodeID) {
/* init -----> gravity --> grav_down */
scheduler_addunlock(sched, cj->init_grav_out, t);
scheduler_addunlock(sched, t, cj->grav_down_in);
if (ci_parent != cj_parent) { /* Avoid double unlock */
scheduler_addunlock(sched, cj_parent->init_grav_out, t);
scheduler_addunlock(sched, t, cj_parent->grav_down_in);
}
}
}
}
......@@ -3278,6 +3294,8 @@ void engine_maketasks(struct engine *e) {
/* Add the communication tasks if MPI is being used. */
if (e->policy & engine_policy_mpi) {
tic2 = getticks();
/* Loop over the proxies and add the send tasks, which also generates the
* cell tags for super-cells. */
for (int pid = 0; pid < e->nr_proxies; pid++) {
......@@ -3306,9 +3324,21 @@ void engine_maketasks(struct engine *e) {
NULL);
}
if (e->verbose)
message("Creating send tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
tic2 = getticks();
/* Exchange the cell tags. */
proxy_tags_exchange(e->proxies, e->nr_proxies, s);
if (e->verbose)
message("Exchanging cell tags took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
tic2 = getticks();
/* Loop over the proxies and add the recv tasks, which relies on having the
* cell tags. */
for (int pid = 0; pid < e->nr_proxies; pid++) {
......@@ -3334,6 +3364,10 @@ void engine_maketasks(struct engine *e) {
if (p->cells_in_type[k] & proxy_cell_type_gravity)
engine_addtasks_recv_gravity(e, p->cells_in[k], NULL);
}
if (e->verbose)
message("Creating recv tasks took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
}
#endif
......
......@@ -39,6 +39,7 @@
/* Local headers. */
#include "cell.h"
#include "engine.h"
#include "error.h"
#include "space.h"
......@@ -62,6 +63,8 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
#ifdef WITH_MPI
ticks tic2 = getticks();
/* Run through the cells and get the size of the tags that will be sent off.
*/
int count_out = 0;
......@@ -99,6 +102,10 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
}
}
if (s->e->verbose)
message("Cell pack tags took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
/* Allocate the incoming and outgoing request handles. */
int num_reqs_out = 0;
int num_reqs_in = 0;
......@@ -138,6 +145,8 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
}
}
tic2 = getticks();
/* Wait for each recv and unpack the tags into the local cells. */
for (int k = 0; k < num_reqs_in; k++) {
int pid = MPI_UNDEFINED;
......@@ -149,6 +158,10 @@ void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
cell_unpack_tags(&tags_in[offset_in[cid]], &s->cells_top[cid]);
}
if (s->e->verbose)
message("Cell unpack tags took %.3f %s.",
clocks_from_ticks(getticks() - tic2), clocks_getunit());
/* Wait for all the sends to have completed. */
if (MPI_Waitall(num_reqs_out, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
error("MPI_Waitall on sends failed.");
......
......@@ -2318,7 +2318,7 @@ void *runner_main(void *data) {
runner_do_grav_long_range(r, t->ci, 1);
break;
case task_type_grav_mm:
runner_dopair_grav_mm(r, t->ci, t->cj);
runner_dopair_grav_mm_progenies(r, t->flags, t->ci, t->cj);
break;
case task_type_cooling:
runner_do_cooling(r, t->ci, 1);
......
......@@ -1289,19 +1289,57 @@ static INLINE void runner_dopair_grav_mm(struct runner *r,
const struct engine *e = r->e;
/* What do we need to do? */
const int do_i =
cell_is_active_gravity_mm(ci, e) && (ci->nodeID == e->nodeID);
const int do_j =
cell_is_active_gravity_mm(cj, e) && (cj->nodeID == e->nodeID);
/* Do we need drifting first? */
if (ci->ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
if (cj->ti_old_multipole < e->ti_current) cell_drift_multipole(cj, e);
/* Interact! */
if (do_i && do_j)
runner_dopair_grav_mm_symmetric(r, ci, cj);
else if (do_i)
runner_dopair_grav_mm_nonsym(r, ci, cj);
else if (do_j)
runner_dopair_grav_mm_nonsym(r, cj, ci);
else
error("Running M-M task with two inactive cells.");
}
/**
* @brief Computes all the M-M interactions between all the well-separated (at
* rebuild) pairs of progenies of the two cells.
*
* @param r The #runner thread.
* @param flags The task flag containing the list of well-separated pairs as a
* bit-field.
* @param ci The first #cell.
* @param cj The second #cell.
*/
static INLINE void runner_dopair_grav_mm_progenies(struct runner *r,
const long long flags,
struct cell *restrict ci,
struct cell *restrict cj) {
/* Loop over all pairs of progenies */
for (int i = 0; i < 8; i++) {
if (ci->progeny[i] != NULL) {
for (int j = 0; j < 8; j++) {
if (cj->progeny[j] != NULL) {
struct cell *cpi = ci->progeny[i];
struct cell *cpj = cj->progeny[j];
const int flag = i * 8 + j;
/* Did we agree to use an M-M interaction here at the last rebuild? */
if (flags & (1LL << flag)) runner_dopair_grav_mm(r, cpi, cpj);
}
}
}
}
}
static INLINE void runner_dopair_recursive_grav_pm(struct runner *r,
......@@ -1628,16 +1666,15 @@ static INLINE void runner_do_grav_long_range(struct runner *r, struct cell *ci,
error("Non-local cell in long-range gravity task!");
/* Check multipole has been drifted */
if (ci->ti_old_multipole != e->ti_current)
error("Interacting un-drifted multipole");
if (ci->ti_old_multipole < e->ti_current) cell_drift_multipole(ci, e);
/* Get this cell's multipole information */
struct gravity_tensors *const multi_i = ci->multipole;
/* Find this cell's top-level (great-)parent */
struct cell *top = ci;
while (top->parent != NULL) top = top->parent;
/* Flag that contributions will be recieved */
struct gravity_tensors *const multi_i = ci->multipole;
/* Recover the top-level multipole (for distance checks) */
struct gravity_tensors *const multi_top = top->multipole;
const double CoM_rebuild_top[3] = {multi_top->CoM_rebuild[0],
......
......@@ -888,20 +888,6 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
break;
}
/* Should we replace it with an M-M task? */
if (cell_can_use_pair_mm_rebuild(ci, cj, e, sp)) {
t->type = task_type_grav_mm;
t->subtype = task_subtype_none;
/* Since this task will not be split, we can already link it */
atomic_inc(&ci->nr_mm_tasks);
atomic_inc(&cj->nr_mm_tasks);
engine_addlink(e, &ci->grav_mm, t);
engine_addlink(e, &cj->grav_mm, t);
break;
}
/* Should this task be split-up? */
if (cell_can_split_pair_gravity_task(ci) &&
cell_can_split_pair_gravity_task(cj)) {
......@@ -910,41 +896,58 @@ static void scheduler_splittask_gravity(struct task *t, struct scheduler *s) {
const long long gcount_j = cj->gcount;
/* Replace by a single sub-task? */
if (scheduler_dosub && /* Use division to avoid integer overflow. */
if (scheduler_dosub &&
gcount_i * gcount_j < ((long long)space_subsize_pair_grav)) {
/* Otherwise, split it. */
} else {
/* Take a step back (we're going to recycle the current task)... */
redo = 1;
/* Find the first non-empty childrens of the cells */
int first_ci_child = 0, first_cj_child = 0;
while (ci->progeny[first_ci_child] == NULL) first_ci_child++;
while (cj->progeny[first_cj_child] == NULL) first_cj_child++;
/* Recycle the current pair */
t->ci = ci->progeny[first_ci_child];
t->cj = cj->progeny[first_cj_child];
/* Turn the task into a M-M task that will take care of all the
* progeny pairs */
t->type = task_type_grav_mm;
t->subtype = task_subtype_none;
t->flags = 0;
/* Make a task for every other pair of progeny */
for (int i = first_ci_child; i < 8; i++) {
for (int i = 0; i < 8; i++) {
if (ci->progeny[i] != NULL) {
for (int j = first_cj_child; j < 8; j++) {
for (int j = 0; j < 8; j++) {
if (cj->progeny[j] != NULL) {
/* Skip the recycled pair */
if (i == first_ci_child && j == first_cj_child) continue;
/* Can we use a M-M interaction here? */
if (cell_can_use_pair_mm_rebuild(ci->progeny[i],
cj->progeny[j], e, sp)) {
scheduler_splittask_gravity(
scheduler_addtask(s, task_type_pair, t->subtype, 0, 0,
ci->progeny[i], cj->progeny[j]),
s);
/* Flag this pair as being treated by the M-M task.
* We use the 64 bits in the task->flags field to store
* this information. The corresponding taks will unpack
* the information and operate according to the choices
* made here. */
const int flag = i * 8 + j;
t->flags |= (1LL << flag);
} else {
/* Ok, we actually have to create a task */
scheduler_splittask_gravity(
scheduler_addtask(s, task_type_pair, task_subtype_grav,
0, 0, ci->progeny[i], cj->progeny[j]),
s);
}
}
}
}
}
/* Can none of the progenies use M-M calculations? */
if (t->flags == 0) {
t->type = task_type_none;
t->subtype = task_subtype_none;
t->ci = NULL;
t->cj = NULL;
t->skip = 1;
}
} /* Split the pair */
}
} /* pair interaction? */
......@@ -1309,15 +1312,15 @@ void scheduler_reweight(struct scheduler *s, int verbose) {
case task_type_sub_pair:
if (t->ci->nodeID != nodeID || t->cj->nodeID != nodeID) {
if (t->flags < 0)
cost = 3.f * (wscale * count_i) * count_j;
else
cost = 3.f * (wscale * count_i) * count_j * sid_scale[t->flags];
#ifdef SWIFT_DEBUG_CHECKS
if (t->flags < 0) error("Negative flag value!");
#endif
cost = 3.f * (wscale * count_i) * count_j * sid_scale[t->flags];
} else {
if (t->flags < 0)
cost = 2.f * (wscale * count_i) * count_j;
else
cost = 2.f * (wscale * count_i) * count_j * sid_scale[t->flags];
#ifdef SWIFT_DEBUG_CHECKS
if (t->flags < 0) error("Negative flag value!");
#endif
cost = 2.f * (wscale * count_i) * count_j * sid_scale[t->flags];
}
break;
......
......@@ -267,10 +267,17 @@ void space_regrid(struct space *s, int verbose) {
// tic = getticks();
float h_max = s->cell_min / kernel_gamma / space_stretch;
if (nr_parts > 0) {
if (s->cells_top != NULL) {
if (s->local_cells_top != NULL) {
for (int k = 0; k < s->nr_local_cells; ++k) {
const struct cell *c = &s->cells_top[s->local_cells_top[k]];
if (c->h_max > h_max) {
h_max = s->cells_top[k].h_max;
}
}
} else if (s->cells_top != NULL) {
for (int k = 0; k < s->nr_cells; k++) {
if (s->cells_top[k].nodeID == engine_rank &&
s->cells_top[k].h_max > h_max) {
const struct cell *c = &s->cells_top[k];
if (c->nodeID == engine_rank && c->h_max > h_max) {
h_max = s->cells_top[k].h_max;
}
}
......
......@@ -366,7 +366,7 @@ int task_lock(struct task *t) {
char buff[MPI_MAX_ERROR_STRING];
int len;
MPI_Error_string(err, buff, &len);
error("Failed to test request on send/recv task (tag=%i, %s).",
error("Failed to test request on send/recv task (tag=%lld, %s).",
t->flags, buff);
}
return res;
......