diff --git a/src/cell.c b/src/cell.c index 8f824811603ba1a146584aa0f1775556f9961cef..46ee8e29db5c562b08ecf01664d2ef62d5737262 100644 --- a/src/cell.c +++ b/src/cell.c @@ -337,7 +337,8 @@ int cell_unpack_end_step(struct cell *restrict c, } /** - * @brief Pack the multipole information of the given cell and all it's sub-cells. + * @brief Pack the multipole information of the given cell and all it's + * sub-cells. * * @param c The #cell. * @param pcells (output) The multipole information we pack into @@ -345,7 +346,7 @@ int cell_unpack_end_step(struct cell *restrict c, * @return The number of packed cells. */ int cell_pack_multipoles(struct cell *restrict c, - struct gravity_tensors *restrict pcells) { + struct gravity_tensors *restrict pcells) { #ifdef WITH_MPI @@ -377,7 +378,7 @@ int cell_pack_multipoles(struct cell *restrict c, * @return The number of cells created. */ int cell_unpack_multipoles(struct cell *restrict c, - struct gravity_tensors *restrict pcells) { + struct gravity_tensors *restrict pcells) { #ifdef WITH_MPI @@ -400,7 +401,6 @@ int cell_unpack_multipoles(struct cell *restrict c, #endif } - /** * @brief Lock a cell for access to its array of #part and hold its parents. * @@ -2077,7 +2077,7 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) { cell_activate_subcell_grav_tasks(t->ci, NULL, s); } else if (t->type == task_type_pair) { cell_activate_subcell_grav_tasks(t->ci, t->cj, s); - + #ifdef WITH_MPI /* Activate the send/recv tasks. */ if (ci->nodeID != engine_rank) { @@ -2085,7 +2085,7 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) { /* If the local cell is active, receive data from the foreign cell. */ if (cj_active) { scheduler_activate(s, ci->recv_grav); - } + } /* If the foreign cell is active, we want its ti_end values. */ if (ci_active) scheduler_activate(s, ci->recv_ti); @@ -2093,42 +2093,41 @@ int cell_unskip_tasks(struct cell *c, struct scheduler *s) { /* Is the foreign cell active and will need stuff from us? */ if (ci_active) { - scheduler_activate_send(s, cj->send_grav, ci->nodeID); + scheduler_activate_send(s, cj->send_grav, ci->nodeID); /* Drift the cell which will be sent at the level at which it is sent, i.e. drift the cell specified in the send task (l->t) itself. */ cell_activate_drift_gpart(cj, s); - } + } /* If the local cell is active, send its ti_end values. */ if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID); - } else if (cj->nodeID != engine_rank) { + } else if (cj->nodeID != engine_rank) { /* If the local cell is active, receive data from the foreign cell. */ if (ci_active) { scheduler_activate(s, cj->recv_grav); - } + } /* If the foreign cell is active, we want its ti_end values. */ if (cj_active) scheduler_activate(s, cj->recv_ti); /* Is the foreign cell active and will need stuff from us? */ if (cj_active) { - - scheduler_activate_send(s, ci->send_grav, cj->nodeID); + scheduler_activate_send(s, ci->send_grav, cj->nodeID); /* Drift the cell which will be sent at the level at which it is sent, i.e. drift the cell specified in the send task (l->t) itself. */ cell_activate_drift_gpart(ci, s); - } + } /* If the local cell is active, send its ti_end values. */ if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID); - } + } #endif } } diff --git a/src/cell.h b/src/cell.h index 32afb9b3fd240bcb4ca9adc549119360f8e484d4..74d408307109d73ddc5e5c15df13616e27f3a099 100644 --- a/src/cell.h +++ b/src/cell.h @@ -248,7 +248,7 @@ struct cell { struct task *recv_grav; /* Task receiving multipole data. */ - //struct task *recv_multipole; + // struct task *recv_multipole; /* Task receiving data (time-step). */ struct task *recv_ti; @@ -266,7 +266,7 @@ struct cell { struct link *send_grav; /* Linked list for sending multipole data. */ - //struct link *send_multipole; + // struct link *send_multipole; /* Linked list for sending data (time-step). */ struct link *send_ti; diff --git a/src/engine.c b/src/engine.c index d49d5b9fe0d428c42d87c364a9f5c14ba252fb1f..4bce99f25ea4a4893620c2da168e6d727ac71cdb 100644 --- a/src/engine.c +++ b/src/engine.c @@ -222,7 +222,7 @@ void engine_make_hierarchical_tasks(struct engine *e, struct cell *c) { c->grav_down = scheduler_addtask(s, task_type_grav_down, task_subtype_none, 0, 0, c, NULL); - if(periodic) scheduler_addunlock(s, c->init_grav, c->grav_ghost[0]); + if (periodic) scheduler_addunlock(s, c->init_grav, c->grav_ghost[0]); scheduler_addunlock(s, c->init_grav, c->grav_long_range); scheduler_addunlock(s, c->grav_long_range, c->grav_down); scheduler_addunlock(s, c->grav_down, c->kick2); @@ -1071,9 +1071,10 @@ void engine_addtasks_grav(struct engine *e, struct cell *c, struct task *up, * @param t_gradient The send_gradient #task, if already created. * @param t_ti The send_ti #task, if required and has already been created. */ -void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell *cj, - struct task *t_xv, struct task *t_rho, - struct task *t_gradient, struct task *t_ti) { +void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, + struct cell *cj, struct task *t_xv, + struct task *t_rho, struct task *t_gradient, + struct task *t_ti) { #ifdef WITH_MPI struct link *l = NULL; @@ -1092,8 +1093,8 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell * /* Create the tasks and their dependencies? */ if (t_xv == NULL) { - t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, 6 * ci->tag + 0, - 0, ci, cj); + t_xv = scheduler_addtask(s, task_type_send, task_subtype_xv, + 6 * ci->tag + 0, 0, ci, cj); t_rho = scheduler_addtask(s, task_type_send, task_subtype_rho, 6 * ci->tag + 1, 0, ci, cj); t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend, @@ -1150,8 +1151,8 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell * if (ci->split) for (int k = 0; k < 8; k++) if (ci->progeny[k] != NULL) - engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho, t_gradient, - t_ti); + engine_addtasks_send_hydro(e, ci->progeny[k], cj, t_xv, t_rho, + t_gradient, t_ti); #else error("SWIFT was not compiled with MPI support."); @@ -1167,8 +1168,9 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci, struct cell * * @param t_grav The send_grav #task, if it has already been created. * @param t_multi The send_multi #task, if it has already been created. */ -void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell *cj, - struct task *t_grav, struct task *t_multi, struct task *t_ti) { +void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, + struct cell *cj, struct task *t_grav, + struct task *t_multi, struct task *t_ti) { #ifdef WITH_MPI struct link *l = NULL; @@ -1187,8 +1189,8 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell /* Create the tasks and their dependencies? */ if (t_grav == NULL) { - t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart, 6 * ci->tag + 4, - 0, ci, cj); + t_grav = scheduler_addtask(s, task_type_send, task_subtype_gpart, + 6 * ci->tag + 4, 0, ci, cj); t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend, 6 * ci->tag + 2, 0, ci, cj); @@ -1212,7 +1214,8 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell if (ci->split) for (int k = 0; k < 8; k++) if (ci->progeny[k] != NULL) - engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi, t_ti); + engine_addtasks_send_gravity(e, ci->progeny[k], cj, t_grav, t_multi, + t_ti); #else error("SWIFT was not compiled with MPI support."); @@ -1229,19 +1232,19 @@ void engine_addtasks_send_gravity(struct engine *e, struct cell *ci, struct cell * @param t_gradient The recv_gradient #task, if it has already been created. * @param t_ti The recv_ti #task, if required and has already been created. */ -void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t_xv, - struct task *t_rho, struct task *t_gradient, - struct task *t_ti) { +void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, + struct task *t_xv, struct task *t_rho, + struct task *t_gradient, struct task *t_ti) { #ifdef WITH_MPI struct scheduler *s = &e->sched; /* Have we reached a level where there are any hydro tasks ? */ if (t_xv == NULL && c->density != NULL) { - + /* Create the tasks. */ - t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, 6 * c->tag + 0, 0, - c, NULL); + t_xv = scheduler_addtask(s, task_type_recv, task_subtype_xv, 6 * c->tag + 0, + 0, c, NULL); t_rho = scheduler_addtask(s, task_type_recv, task_subtype_rho, 6 * c->tag + 1, 0, c, NULL); t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend, @@ -1251,13 +1254,13 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t 6 * c->tag + 3, 0, c, NULL); #endif } - + c->recv_xv = t_xv; c->recv_rho = t_rho; c->recv_gradient = t_gradient; c->recv_ti = t_ti; - - /* Add dependencies. */ + +/* Add dependencies. */ #ifdef EXTRA_HYDRO_LOOP for (struct link *l = c->density; l != NULL; l = l->next) { scheduler_addunlock(s, t_xv, l->t); @@ -1288,7 +1291,8 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t if (c->split) for (int k = 0; k < 8; k++) if (c->progeny[k] != NULL) - engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient, t_ti); + engine_addtasks_recv_hydro(e, c->progeny[k], t_xv, t_rho, t_gradient, + t_ti); #else error("SWIFT was not compiled with MPI support."); @@ -1303,17 +1307,19 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c, struct task *t * @param t_grav The recv_gpart #task, if it has already been created. * @param t_multi The recv_multipole #task, if it has already been created. */ -void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task *t_grav, struct task *t_multi, struct task *t_ti) { +void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, + struct task *t_grav, struct task *t_multi, + struct task *t_ti) { #ifdef WITH_MPI struct scheduler *s = &e->sched; /* Have we reached a level where there are any gravity tasks ? */ if (t_grav == NULL && c->grav != NULL) { - + /* Create the tasks. */ - t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart, 6 * c->tag + 4, 0, - c, NULL); + t_grav = scheduler_addtask(s, task_type_recv, task_subtype_gpart, + 6 * c->tag + 4, 0, c, NULL); t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend, 6 * c->tag + 2, 0, c, NULL); @@ -1321,7 +1327,7 @@ void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task c->recv_grav = t_grav; c->recv_ti = t_ti; - + for (struct link *l = c->grav; l != NULL; l = l->next) { scheduler_addunlock(s, t_grav, l->t); scheduler_addunlock(s, l->t, t_ti); @@ -1338,7 +1344,6 @@ void engine_addtasks_recv_gravity(struct engine *e, struct cell *c, struct task #endif } - /** * @brief Exchange cell structures with other nodes. * @@ -1826,65 +1831,66 @@ void engine_exchange_strays(struct engine *e, size_t offset_parts, * @param e The #engine. */ void engine_exchange_top_multipoles(struct engine *e) { - + #ifdef WITH_MPI #ifdef SWIFT_DEBUG_CHECKS - for(int i=0; i<e->s->nr_cells; ++i){ - const struct gravity_tensors *m = &e->s->multipoles_top[i]; - if(e->s->cells_top[i].nodeID == engine_rank) { - if(m->m_pole.M_000 > 0.) { - if(m->CoM[0] < 0. || m->CoM[0] > e->s->dim[0]) - error("Invalid multipole position in X"); - if(m->CoM[1] < 0. || m->CoM[1] > e->s->dim[1]) - error("Invalid multipole position in Y"); - if(m->CoM[2] < 0. || m->CoM[2] > e->s->dim[2]) - error("Invalid multipole position in Z"); + for (int i = 0; i < e->s->nr_cells; ++i) { + const struct gravity_tensors *m = &e->s->multipoles_top[i]; + if (e->s->cells_top[i].nodeID == engine_rank) { + if (m->m_pole.M_000 > 0.) { + if (m->CoM[0] < 0. || m->CoM[0] > e->s->dim[0]) + error("Invalid multipole position in X"); + if (m->CoM[1] < 0. || m->CoM[1] > e->s->dim[1]) + error("Invalid multipole position in Y"); + if (m->CoM[2] < 0. || m->CoM[2] > e->s->dim[2]) + error("Invalid multipole position in Z"); } } else { - if(m->m_pole.M_000 != 0.) error("Non-zero mass for foreign m-pole"); - if(m->CoM[0] != 0.) error("Non-zero position in X for foreign m-pole"); - if(m->CoM[1] != 0.) error("Non-zero position in Y for foreign m-pole"); - if(m->CoM[2] != 0.) error("Non-zero position in Z for foreign m-pole"); - if(m->m_pole.num_gpart != 0) error("Non-zero gpart count in foreign m-pole"); + if (m->m_pole.M_000 != 0.) error("Non-zero mass for foreign m-pole"); + if (m->CoM[0] != 0.) error("Non-zero position in X for foreign m-pole"); + if (m->CoM[1] != 0.) error("Non-zero position in Y for foreign m-pole"); + if (m->CoM[2] != 0.) error("Non-zero position in Z for foreign m-pole"); + if (m->m_pole.num_gpart != 0) + error("Non-zero gpart count in foreign m-pole"); } } #endif - /* Each node (space) has constructed its own top-level multipoles. + /* Each node (space) has constructed its own top-level multipoles. * We now need to make sure every other node has a copy of everything. * * WARNING: Adult stuff ahead: don't do this at home! * - * Since all nodes have their top-level multi-poles computed + * Since all nodes have their top-level multi-poles computed * and all foreign ones set to 0 (all bytes), we can gather all the m-poles - * by doing a bit-wise OR reduction across all the nodes directly in - * place inside the multi-poles_top array. - * This only works if the foreign m-poles on every nodes are zeroed and no + * by doing a bit-wise OR reduction across all the nodes directly in + * place inside the multi-poles_top array. + * This only works if the foreign m-poles on every nodes are zeroed and no * multi-pole is present on more than one node (two things guaranteed by the * domain decomposition). */ MPI_Allreduce(MPI_IN_PLACE, e->s->multipoles_top, - e->s->nr_cells*sizeof(struct gravity_tensors), MPI_BYTE, - MPI_BOR, MPI_COMM_WORLD); - + e->s->nr_cells * sizeof(struct gravity_tensors), MPI_BYTE, + MPI_BOR, MPI_COMM_WORLD); + #ifdef SWIFT_DEBUG_CHECKS long long counter = 0; /* Let's check that what we received makes sense */ - for(int i=0; i<e->s->nr_cells; ++i){ - const struct gravity_tensors *m = &e->s->multipoles_top[i]; + for (int i = 0; i < e->s->nr_cells; ++i) { + const struct gravity_tensors *m = &e->s->multipoles_top[i]; counter += m->m_pole.num_gpart; - if(m->m_pole.M_000 > 0.) { - if(m->CoM[0] < 0. || m->CoM[0] > e->s->dim[0]) - error("Invalid multipole position in X"); - if(m->CoM[1] < 0. || m->CoM[1] > e->s->dim[1]) - error("Invalid multipole position in Y"); - if(m->CoM[2] < 0. || m->CoM[2] > e->s->dim[2]) - error("Invalid multipole position in Z"); + if (m->m_pole.M_000 > 0.) { + if (m->CoM[0] < 0. || m->CoM[0] > e->s->dim[0]) + error("Invalid multipole position in X"); + if (m->CoM[1] < 0. || m->CoM[1] > e->s->dim[1]) + error("Invalid multipole position in Y"); + if (m->CoM[2] < 0. || m->CoM[2] > e->s->dim[2]) + error("Invalid multipole position in Z"); } } - if(counter != e->total_nr_gparts) + if (counter != e->total_nr_gparts) error("Total particles in multipoles inconsistent with engine"); #endif @@ -1894,7 +1900,7 @@ void engine_exchange_top_multipoles(struct engine *e) { } void engine_exchange_proxy_multipoles(struct engine *e) { - + #ifdef WITH_MPI const ticks tic = getticks(); @@ -1907,10 +1913,10 @@ void engine_exchange_proxy_multipoles(struct engine *e) { /* Loop over the proxies. */ for (int pid = 0; pid < e->nr_proxies; pid++) { - + /* Get a handle on the proxy. */ const struct proxy *p = &e->proxies[pid]; - + /* Now collect the number of requests associated */ count_recv_requests += p->nr_cells_in; count_send_requests += p->nr_cells_out; @@ -1918,22 +1924,23 @@ void engine_exchange_proxy_multipoles(struct engine *e) { /* And the actual number of things we are going to ship */ for (int k = 0; k < p->nr_cells_in; k++) count_recv += p->cells_in[k]->pcell_size; - + for (int k = 0; k < p->nr_cells_out; k++) - count_send += p->cells_out[k]->pcell_size; + count_send += p->cells_out[k]->pcell_size; } - /* Allocate the buffers for the packed data */ - struct gravity_tensors *buffer_send = malloc(sizeof(struct gravity_tensors) * count_send); - struct gravity_tensors *buffer_recv = malloc(sizeof(struct gravity_tensors) * count_recv); - if(buffer_send == NULL || buffer_recv == NULL) + /* Allocate the buffers for the packed data */ + struct gravity_tensors *buffer_send = + malloc(sizeof(struct gravity_tensors) * count_send); + struct gravity_tensors *buffer_recv = + malloc(sizeof(struct gravity_tensors) * count_recv); + if (buffer_send == NULL || buffer_recv == NULL) error("Unable to allocate memory for multipole transactions"); /* Also allocate the MPI requests */ const int count_requests = count_send_requests + count_recv_requests; MPI_Request *requests = malloc(sizeof(MPI_Request) * count_requests); - if(requests == NULL) - error("Unable to allocate memory for MPI requests"); + if (requests == NULL) error("Unable to allocate memory for MPI requests"); int this_request = 0; int this_recv = 0; @@ -1945,13 +1952,15 @@ void engine_exchange_proxy_multipoles(struct engine *e) { /* Get a handle on the proxy. */ const struct proxy *p = &e->proxies[pid]; - for (int k = 0; k < p->nr_cells_in; k++) { + for (int k = 0; k < p->nr_cells_in; k++) { const int num_elements = p->cells_in[k]->pcell_size; /* Receive everything */ - MPI_Irecv(&buffer_recv[this_recv], num_elements * sizeof(struct gravity_tensors), MPI_BYTE, - p->cells_in[k]->nodeID, p->cells_in[k]->tag, MPI_COMM_WORLD, &requests[this_request]); + MPI_Irecv(&buffer_recv[this_recv], + num_elements * sizeof(struct gravity_tensors), MPI_BYTE, + p->cells_in[k]->nodeID, p->cells_in[k]->tag, MPI_COMM_WORLD, + &requests[this_request]); /* Move to the next slot in the buffers */ this_recv += num_elements; @@ -1959,58 +1968,63 @@ void engine_exchange_proxy_multipoles(struct engine *e) { } /* Loop over the proxies to issue the sends. */ - for (int k = 0; k < p->nr_cells_out; k++) { - + for (int k = 0; k < p->nr_cells_out; k++) { + /* Number of multipoles in this cell hierarchy */ const int num_elements = p->cells_out[k]->pcell_size; - + /* Let's pack everything recursively */ cell_pack_multipoles(p->cells_out[k], &buffer_send[this_send]); - - /* Send everything (note the use of cells_in[0] to get the correct node ID. */ - MPI_Isend(&buffer_send[this_send], num_elements * sizeof(struct gravity_tensors), MPI_BYTE, - p->cells_in[0]->nodeID, p->cells_out[k]->tag, MPI_COMM_WORLD, &requests[this_request]); - + + /* Send everything (note the use of cells_in[0] to get the correct node + * ID. */ + MPI_Isend(&buffer_send[this_send], + num_elements * sizeof(struct gravity_tensors), MPI_BYTE, + p->cells_in[0]->nodeID, p->cells_out[k]->tag, MPI_COMM_WORLD, + &requests[this_request]); + /* Move to the next slot in the buffers */ this_send += num_elements; this_request++; - } + } } /* Wait for all the requests to arrive home */ MPI_Status *stats = malloc(count_requests * sizeof(MPI_Status)); int res; - if((res = MPI_Waitall(count_requests, requests, stats)) != MPI_SUCCESS) { - for(int k = 0; k < count_requests; ++k) { - char buff[MPI_MAX_ERROR_STRING]; - MPI_Error_string(stats[k].MPI_ERROR, buff, &res); - message("request from source %i, tag %i has error '%s'.", - stats[k].MPI_SOURCE, stats[k].MPI_TAG, buff); + if ((res = MPI_Waitall(count_requests, requests, stats)) != MPI_SUCCESS) { + for (int k = 0; k < count_requests; ++k) { + char buff[MPI_MAX_ERROR_STRING]; + MPI_Error_string(stats[k].MPI_ERROR, buff, &res); + message("request from source %i, tag %i has error '%s'.", + stats[k].MPI_SOURCE, stats[k].MPI_TAG, buff); } error("Failed during waitall for multipole data."); } /* Let's now unpack the multipoles at the right place */ this_recv = 0; - for (int pid = 0; pid < e->nr_proxies; pid++) { + for (int pid = 0; pid < e->nr_proxies; pid++) { /* Get a handle on the proxy. */ const struct proxy *p = &e->proxies[pid]; - for (int k = 0; k < p->nr_cells_in; k++) { + for (int k = 0; k < p->nr_cells_in; k++) { const int num_elements = p->cells_in[k]->pcell_size; #ifdef SWIFT_DEBUG_CHECKS - /* Check that the first element (top-level cell's multipole) matches what we received */ - if(p->cells_in[k]->multipole->m_pole.num_gpart != buffer_recv[this_recv].m_pole.num_gpart) - error("Current: M_000=%e num_gpart=%lld\n New: M_000=%e num_gpart=%lld", - p->cells_in[k]->multipole->m_pole.M_000, - p->cells_in[k]->multipole->m_pole.num_gpart, - buffer_recv[this_recv].m_pole.M_000, - buffer_recv[this_recv].m_pole.num_gpart); -#endif + /* Check that the first element (top-level cell's multipole) matches what + * we received */ + if (p->cells_in[k]->multipole->m_pole.num_gpart != + buffer_recv[this_recv].m_pole.num_gpart) + error("Current: M_000=%e num_gpart=%lld\n New: M_000=%e num_gpart=%lld", + p->cells_in[k]->multipole->m_pole.M_000, + p->cells_in[k]->multipole->m_pole.num_gpart, + buffer_recv[this_recv].m_pole.M_000, + buffer_recv[this_recv].m_pole.num_gpart); +#endif /* Unpack recursively */ cell_unpack_multipoles(p->cells_in[k], &buffer_recv[this_recv]); @@ -2020,7 +2034,6 @@ void engine_exchange_proxy_multipoles(struct engine *e) { } } - /* Free everything */ free(stats); free(buffer_send); @@ -2104,7 +2117,6 @@ void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements, /* const int cjd = cell_getid(cdim, ii, jj, kk); */ /* struct cell *cj = &cells[cjd]; */ - /* Now loop over all the neighbours of this cell */ for (int ii = -1; ii < 2; ii++) { int iii = i + ii; @@ -2123,15 +2135,16 @@ void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements, const int cjd = cell_getid(cdim, iii, jjj, kkk); struct cell *cj = &cells[cjd]; - /* if(i==11 && j==0 && k==10) */ - /* message("Found direct neighbour: (i,j,k)=(%d,%d,%d) (iii,jjj,kkk)=(%d,%d,%d) nodeID=%d", i,j,k, iii,jjj,kkk, cj->nodeID); */ - + /* if(i==11 && j==0 && k==10) */ + /* message("Found direct neighbour: (i,j,k)=(%d,%d,%d) + * (iii,jjj,kkk)=(%d,%d,%d) nodeID=%d", i,j,k, iii,jjj,kkk, + * cj->nodeID); */ - /* Avoid duplicates of local pairs*/ - if(cid <= cjd && cj->nodeID == nodeID) continue; + /* Avoid duplicates of local pairs*/ + if (cid <= cjd && cj->nodeID == nodeID) continue; - /* Skip cells without gravity particles */ - if (cj->gcount == 0) continue; + /* Skip cells without gravity particles */ + if (cj->gcount == 0) continue; /* Recover the multipole information */ const struct gravity_tensors *const multi_j = cj->multipole; @@ -2150,7 +2163,8 @@ void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements, const double r2 = dx * dx + dy * dy + dz * dz; /* Are the cells too close for a MM interaction ? */ - if (1 || !gravity_M2L_accept(multi_i->r_max_rebuild, + if (1 || + !gravity_M2L_accept(multi_i->r_max_rebuild, multi_j->r_max_rebuild, theta_crit2, r2)) { /* Ok, we need to add a direct pair calculation */ @@ -2167,7 +2181,7 @@ void engine_make_self_gravity_tasks_mapper(void *map_data, int num_elements, * @brief Constructs the top-level tasks for the short-range gravity * interactions (master function). * - * - Create the FFT task and the array of gravity ghosts. + * - Create the FFT task and the array of gravity ghosts. * - Call the mapper function to create the other tasks. * * @param e The #engine. @@ -2210,13 +2224,15 @@ void engine_make_self_gravity_tasks(struct engine *e) { s->nr_cells, 1, 0, extra_data); #ifdef SWIFT_DEBUG_CHECKS - if(periodic) - for(int i=0; i < s->nr_cells; ++i) { + if (periodic) + for (int i = 0; i < s->nr_cells; ++i) { const struct cell *c = &s->cells_top[i]; - if(c->nodeID == engine_rank && (c->grav_ghost[0] == NULL || c->grav_ghost[0] == NULL)) - error("Invalid gravity_ghost for local cell"); - if(c->nodeID != engine_rank && (c->grav_ghost[0] != NULL || c->grav_ghost[0] != NULL)) - error("Invalid gravity_ghost for foreign cell"); + if (c->nodeID == engine_rank && + (c->grav_ghost[0] == NULL || c->grav_ghost[0] == NULL)) + error("Invalid gravity_ghost for local cell"); + if (c->nodeID != engine_rank && + (c->grav_ghost[0] != NULL || c->grav_ghost[0] != NULL)) + error("Invalid gravity_ghost for foreign cell"); } #endif @@ -2928,24 +2944,25 @@ void engine_maketasks(struct engine *e) { /* Loop through the proxy's incoming cells and add the recv tasks. */ - if(e->policy & engine_policy_hydro) - for (int k = 0; k < p->nr_cells_in; k++) - engine_addtasks_recv_hydro(e, p->cells_in[k], NULL, NULL, NULL, NULL); + if (e->policy & engine_policy_hydro) + for (int k = 0; k < p->nr_cells_in; k++) + engine_addtasks_recv_hydro(e, p->cells_in[k], NULL, NULL, NULL, NULL); - if(e->policy & engine_policy_self_gravity) - for (int k = 0; k < p->nr_cells_in; k++) - engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL, NULL); + if (e->policy & engine_policy_self_gravity) + for (int k = 0; k < p->nr_cells_in; k++) + engine_addtasks_recv_gravity(e, p->cells_in[k], NULL, NULL, NULL); /* Loop through the proxy's outgoing cells and add the send tasks. */ - if(e->policy & engine_policy_hydro) - for (int k = 0; k < p->nr_cells_out; k++) - engine_addtasks_send_hydro(e, p->cells_out[k], p->cells_in[0], NULL, NULL, - NULL, NULL); - - if(e->policy & engine_policy_self_gravity) - for (int k = 0; k < p->nr_cells_out; k++) - engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, NULL, NULL); + if (e->policy & engine_policy_hydro) + for (int k = 0; k < p->nr_cells_out; k++) + engine_addtasks_send_hydro(e, p->cells_out[k], p->cells_in[0], NULL, + NULL, NULL, NULL); + + if (e->policy & engine_policy_self_gravity) + for (int k = 0; k < p->nr_cells_out; k++) + engine_addtasks_send_gravity(e, p->cells_out[k], p->cells_in[0], NULL, + NULL, NULL); } } #endif @@ -3156,7 +3173,6 @@ void engine_marktasks_mapper(void *map_data, int num_elements, #endif } - /* Only interested in gravity tasks as of here. */ if (t->subtype == task_subtype_grav) { @@ -3167,7 +3183,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements, /* If the local cell is active, receive data from the foreign cell. */ if (cj_active) { scheduler_activate(s, ci->recv_grav); - } + } /* If the foreign cell is active, we want its ti_end values. */ if (ci_active) scheduler_activate(s, ci->recv_ti); @@ -3182,17 +3198,17 @@ void engine_marktasks_mapper(void *map_data, int num_elements, sent, i.e. drift the cell specified in the send task (l->t) itself. */ cell_activate_drift_gpart(l->t->ci, s); - } + } /* If the local cell is active, send its ti_end values. */ if (cj_active) scheduler_activate_send(s, cj->send_ti, ci->nodeID); - } else if (cj->nodeID != engine_rank) { + } else if (cj->nodeID != engine_rank) { /* If the local cell is active, receive data from the foreign cell. */ if (ci_active) { scheduler_activate(s, cj->recv_grav); - } + } /* If the foreign cell is active, we want its ti_end values. */ if (cj_active) scheduler_activate(s, cj->recv_ti); @@ -3201,18 +3217,17 @@ void engine_marktasks_mapper(void *map_data, int num_elements, if (cj_active) { struct link *l = - scheduler_activate_send(s, ci->send_grav, cj->nodeID); - + scheduler_activate_send(s, ci->send_grav, cj->nodeID); /* Drift the cell which will be sent at the level at which it is sent, i.e. drift the cell specified in the send task (l->t) itself. */ cell_activate_drift_gpart(l->t->ci, s); - } + } /* If the local cell is active, send its ti_end values. */ if (ci_active) scheduler_activate_send(s, ci->send_ti, cj->nodeID); - } + } #endif } } @@ -3443,7 +3458,6 @@ int engine_estimate_nr_tasks(struct engine *e) { */ void engine_rebuild(struct engine *e, int clean_h_values) { - const ticks tic = getticks(); /* Clear the forcerebuild flag, whatever it was. */ @@ -3455,14 +3469,14 @@ void engine_rebuild(struct engine *e, int clean_h_values) { /* Initial cleaning up session ? */ if (clean_h_values) space_sanitize(e->s); - /* If in parallel, exchange the cell structure, top-level and neighbouring multipoles. */ +/* If in parallel, exchange the cell structure, top-level and neighbouring + * multipoles. */ #ifdef WITH_MPI engine_exchange_cells(e); - if(e->policy & engine_policy_self_gravity) - engine_exchange_top_multipoles(e); + if (e->policy & engine_policy_self_gravity) engine_exchange_top_multipoles(e); - if(e->policy & engine_policy_self_gravity) + if (e->policy & engine_policy_self_gravity) engine_exchange_proxy_multipoles(e); #endif @@ -3916,7 +3930,8 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs, num_gpart_mpole += e->s->cells_top[i].multipole->m_pole.num_gpart; if (num_gpart_mpole != e->total_nr_gparts) error( - "Top-level multipoles don't contain the total number of gpart s->nr_gpart=%zd, " + "Top-level multipoles don't contain the total number of gpart " + "s->nr_gpart=%zd, " "m_poles=%zd", e->total_nr_gparts, num_gpart_mpole); } @@ -4682,8 +4697,8 @@ void engine_unpin() { */ void engine_init(struct engine *e, struct space *s, const struct swift_params *params, int nr_nodes, int nodeID, - int nr_threads, long long Ngas, long long Ndm, int with_aff, int policy, - int verbose, struct repartition *reparttype, + int nr_threads, long long Ngas, long long Ndm, int with_aff, + int policy, int verbose, struct repartition *reparttype, const struct unit_system *internal_units, const struct phys_const *physical_constants, const struct hydro_props *hydro, diff --git a/src/engine.h b/src/engine.h index df1d047122955291c822d6049f73ae11e4067cc0..09a99e2d71c89ef106777e6f3c2cb127eb9b3604 100644 --- a/src/engine.h +++ b/src/engine.h @@ -265,8 +265,8 @@ void engine_print_stats(struct engine *e); void engine_dump_snapshot(struct engine *e); void engine_init(struct engine *e, struct space *s, const struct swift_params *params, int nr_nodes, int nodeID, - int nr_threads, long long Ngas, long long Ndm, int with_aff, int policy, - int verbose, struct repartition *reparttype, + int nr_threads, long long Ngas, long long Ndm, int with_aff, + int policy, int verbose, struct repartition *reparttype, const struct unit_system *internal_units, const struct phys_const *physical_constants, const struct hydro_props *hydro, diff --git a/src/gravity_properties.c b/src/gravity_properties.c index cb3860b5f60a7fd44054505e62e9caf847c4ec35..43a1d0d78f403ba9d2f1202db5e8d7e648ad6a11 100644 --- a/src/gravity_properties.c +++ b/src/gravity_properties.c @@ -52,7 +52,7 @@ void gravity_props_init(struct gravity_props *p, /* Opening angle */ p->theta_crit = parser_get_param_double(params, "Gravity:theta"); - //if (p->theta_crit >= 1.) error("Theta too large. FMM won't converge."); + // if (p->theta_crit >= 1.) error("Theta too large. FMM won't converge."); p->theta_crit2 = p->theta_crit * p->theta_crit; p->theta_crit_inv = 1. / p->theta_crit; diff --git a/src/multipole.h b/src/multipole.h index f7a3a2e7f00e2bed67ff02096f317f7d682e987d..fa76280e1bc95c2525e868b8680d37bad140340c 100644 --- a/src/multipole.h +++ b/src/multipole.h @@ -181,19 +181,19 @@ struct gravity_tensors { /*! Multipole mass */ struct multipole m_pole; - + /*! Field tensor for the potential */ struct grav_tensor pot; - + /*! Centre of mass of the matter dsitribution */ double CoM[3]; - + /*! Centre of mass of the matter dsitribution at the last rebuild */ double CoM_rebuild[3]; - + /*! Upper limit of the CoM<->gpart distance */ double r_max; - + /*! Upper limit of the CoM<->gpart distance at the last rebuild */ double r_max_rebuild; }; @@ -232,7 +232,7 @@ INLINE static void gravity_drift(struct gravity_tensors *m, double dt, m->CoM[2] += dz; /* Conservative change in maximal radius containing all gpart */ - m->r_max = m->r_max_rebuild + 0.*x_diff; + m->r_max = m->r_max_rebuild + 0. * x_diff; } /** diff --git a/src/runner.c b/src/runner.c index 36c65f95ddc7a656e2f8a7a11556235712176fec..58b9b710cc270f27e057ce404b51fc5d7118dc85 100644 --- a/src/runner.c +++ b/src/runner.c @@ -553,7 +553,7 @@ void runner_do_init_grav(struct runner *r, struct cell *c, int timer) { if (!cell_is_active(c, e)) return; /* Drift the multipole */ - //cell_drift_multipole(c, e); + // cell_drift_multipole(c, e); /* Reset the gravity acceleration tensors */ gravity_field_tensors_init(&c->multipole->pot, e->ti_current); @@ -1419,15 +1419,14 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) { TIMER_TIC; - #if (ICHECK != 0) - for(int i=0; i < c->gcount; ++i) - if(c->gparts[i].id_or_neg_offset == ICHECK) { - message("Found gpart"); fflush(stdout); + for (int i = 0; i < c->gcount; ++i) + if (c->gparts[i].id_or_neg_offset == ICHECK) { + message("Found gpart"); + fflush(stdout); } #endif - /* Anything to do here? */ if (!cell_is_active(c, e)) return; @@ -1485,15 +1484,15 @@ void runner_do_end_force(struct runner *r, struct cell *c, int timer) { /* Check that this gpart has interacted with all the other * particles (via direct or multipoles) in the box */ - if (gp->num_interacted != e->total_nr_gparts && gp->id_or_neg_offset == ICHECK) + if (gp->num_interacted != e->total_nr_gparts && + gp->id_or_neg_offset == ICHECK) error( "g-particle (id=%lld, type=%s) did not interact " "gravitationally " "with all other gparts gp->num_interacted=%lld, " "total_gparts=%zd (local num_gparts=%zd)", gp->id_or_neg_offset, part_type_names[gp->type], - gp->num_interacted, e->total_nr_gparts, - e->s->nr_gparts); + gp->num_interacted, e->total_nr_gparts, e->s->nr_gparts); } #endif } @@ -1985,7 +1984,7 @@ void *runner_main(void *data) { runner_do_recv_spart(r, ci, 1); } else if (t->subtype == task_subtype_multipole) { cell_unpack_multipoles(ci, t->buff); - free(t->buff); + free(t->buff); } else { error("Unknown/invalid task subtype (%d).", t->subtype); } diff --git a/src/runner_doiact_grav.h b/src/runner_doiact_grav.h index 9565ba10008d96486c2b366afc5afbfe87d3993b..26d6b90c745e91554c87e5c476b228f50d14cb38 100644 --- a/src/runner_doiact_grav.h +++ b/src/runner_doiact_grav.h @@ -45,10 +45,10 @@ void runner_do_grav_down(struct runner *r, struct cell *c, int timer) { const int gcount = c->gcount; #if (ICHECK != 0) - for(int i=0; i < c->gcount; ++i) - if(c->gparts[i].id_or_neg_offset == ICHECK) - message("Found gpart depth=%d split=%d m->num_interacted=%lld", - c->depth, c->split, c->multipole->pot.num_interacted); + for (int i = 0; i < c->gcount; ++i) + if (c->gparts[i].id_or_neg_offset == ICHECK) + message("Found gpart depth=%d split=%d m->num_interacted=%lld", c->depth, + c->split, c->multipole->pot.num_interacted); #endif TIMER_TIC; @@ -136,7 +136,6 @@ void runner_do_grav_down(struct runner *r, struct cell *c, int timer) { void runner_dopair_grav_mm(const struct runner *r, struct cell *restrict ci, struct cell *restrict cj) { - /* Some constants */ const struct engine *e = r->e; const struct space *s = e->s; @@ -157,7 +156,8 @@ void runner_dopair_grav_mm(const struct runner *r, struct cell *restrict ci, #ifdef SWIFT_DEBUG_CHECKS if (ci == cj) error("Interacting a cell with itself using M2L"); - if (multi_j->num_gpart == 0) error("Multipole does not seem to have been set."); + if (multi_j->num_gpart == 0) + error("Multipole does not seem to have been set."); if (ci->multipole->pot.ti_init != e->ti_current) error("ci->grav tensor not initialised."); @@ -1156,8 +1156,8 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { int direct_ngbs = 0; int direct_ngbs_gpart = 0; int other_ngbs_gpart = 0; - for(int i=0; i < ci->gcount; ++i) - if(ci->gparts[i].id_or_neg_offset == ICHECK) { + for (int i = 0; i < ci->gcount; ++i) + if (ci->gparts[i].id_or_neg_offset == ICHECK) { message("Found gpart"); check = 1; } @@ -1178,7 +1178,7 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { if (ci->nodeID != engine_rank) error("Non-local cell in long-range gravity task!"); - + /* Check multipole has been drifted */ if (ci->ti_old_multipole != e->ti_current) error("Interacting un-drifted multipole"); @@ -1191,7 +1191,7 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { /* multi_i->CoM_rebuild[2]}; */ /* Get the cell index. MATTHIEU */ - const int cid = (ci - cells);// / sizeof(struct cell); + const int cid = (ci - cells); // / sizeof(struct cell); const int i = cid / (cdim[1] * cdim[2]); const int j = (cid / cdim[2]) % cdim[1]; const int k = cid % cdim[2]; @@ -1212,7 +1212,7 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { #endif // MATTHIEU - const int cjd = (cj - cells);// / sizeof(struct cell); + const int cjd = (cj - cells); // / sizeof(struct cell); const int ii = cjd / (cdim[1] * cdim[2]); const int jj = (cjd / cdim[2]) % cdim[1]; const int kk = cjd % cdim[2]; @@ -1235,27 +1235,29 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { /* Are we in charge of this cell pair? MATTHIEU*/ /* if (gravity_M2L_accept(multi_i->r_max_rebuild, multi_j->r_max_rebuild, */ /* theta_crit2, r2_rebuild)) { */ - if((abs(i-ii) <= 1 || abs(i-ii - cdim[0]) <= 1 || abs(i-ii + cdim[0]) <= 1) && - (abs(j-jj) <= 1 || abs(j-jj - cdim[1]) <= 1 || abs(j-jj + cdim[1]) <= 1) && - (abs(k-kk) <= 1 || abs(k-kk - cdim[2]) <= 1 || abs(k-kk + cdim[2]) <= 1)) { - + if ((abs(i - ii) <= 1 || abs(i - ii - cdim[0]) <= 1 || + abs(i - ii + cdim[0]) <= 1) && + (abs(j - jj) <= 1 || abs(j - jj - cdim[1]) <= 1 || + abs(j - jj + cdim[1]) <= 1) && + (abs(k - kk) <= 1 || abs(k - kk - cdim[2]) <= 1 || + abs(k - kk + cdim[2]) <= 1)) { #if (ICHECK != 0) - if(check) { - ++direct_ngbs; - direct_ngbs_gpart += cj->multipole->m_pole.num_gpart; - message("Found direct neighbour %d: (i,j,k)=(%d,%d,%d) (ii,jj,kk)=(%d,%d,%d) nodeID=%d", - direct_ngbs, i,j,k, ii,jj,kk, cj->nodeID); + if (check) { + ++direct_ngbs; + direct_ngbs_gpart += cj->multipole->m_pole.num_gpart; + message( + "Found direct neighbour %d: (i,j,k)=(%d,%d,%d) " + "(ii,jj,kk)=(%d,%d,%d) nodeID=%d", + direct_ngbs, i, j, k, ii, jj, kk, cj->nodeID); } #endif - - }else{ + } else { #if (ICHECK != 0) - if(check) - other_ngbs_gpart += cj->multipole->m_pole.num_gpart; -#endif + if (check) other_ngbs_gpart += cj->multipole->m_pole.num_gpart; +#endif /* Let's compute the current distance between the cell pair*/ double dx = CoM_i[0] - multi_j->CoM[0]; @@ -1292,17 +1294,19 @@ void runner_do_grav_long_range(struct runner *r, struct cell *ci, int timer) { } } /* We are in charge of this pair */ } /* Loop over top-level cells */ - #ifdef SWIFT_DEBUG_CHECKS counter += ci->multipole->m_pole.num_gpart; - if(counter != e->total_nr_gparts) + if (counter != e->total_nr_gparts) error("Not found the right number of particles in top-level interactions"); #endif - if(check) - message("Interacted with %d indirectly and ignored %d direct interactions (counter=%lld) nr_cells=%d total=%lld", - other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells, e->total_nr_gparts); + if (check) + message( + "Interacted with %d indirectly and ignored %d direct interactions " + "(counter=%lld) nr_cells=%d total=%lld", + other_ngbs_gpart, direct_ngbs_gpart, counter, nr_cells, + e->total_nr_gparts); if (timer) TIMER_TOC(timer_dograv_long_range); } diff --git a/src/scheduler.c b/src/scheduler.c index 295f1bf0a31e0a1043901783b2f75fcdd8d6d415..6480309604a6bf93e5ddc31d865c87fbedfe7b80 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -1293,10 +1293,10 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { err = MPI_Irecv(t->ci->sparts, t->ci->scount, spart_mpi_type, t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req); } else if (t->subtype == task_subtype_multipole) { - t->buff = malloc(sizeof(struct gravity_tensors) * t->ci->pcell_size); - err = MPI_Irecv(t->buff, sizeof(struct gravity_tensors) * t->ci->pcell_size, - MPI_BYTE, t->ci->nodeID, t->flags, MPI_COMM_WORLD, - &t->req); + t->buff = malloc(sizeof(struct gravity_tensors) * t->ci->pcell_size); + err = MPI_Irecv( + t->buff, sizeof(struct gravity_tensors) * t->ci->pcell_size, + MPI_BYTE, t->ci->nodeID, t->flags, MPI_COMM_WORLD, &t->req); } else { error("Unknown communication sub-type"); } @@ -1330,13 +1330,13 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) { } else if (t->subtype == task_subtype_spart) { err = MPI_Isend(t->ci->sparts, t->ci->scount, spart_mpi_type, t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req); - } else if (t->subtype == task_subtype_multipole) { + } else if (t->subtype == task_subtype_multipole) { t->buff = malloc(sizeof(struct gravity_tensors) * t->ci->pcell_size); cell_pack_multipoles(t->ci, t->buff); err = MPI_Isend( - t->buff, t->ci->pcell_size * sizeof(struct gravity_tensors), MPI_BYTE, - t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req); - } else { + t->buff, t->ci->pcell_size * sizeof(struct gravity_tensors), + MPI_BYTE, t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req); + } else { error("Unknown communication sub-type"); } if (err != MPI_SUCCESS) { diff --git a/src/space.c b/src/space.c index 2fdeca1ac39d9902cde23e91ecbefce78312ce17..c4da734c0cda6762826816b1b40907718f0d9706 100644 --- a/src/space.c +++ b/src/space.c @@ -234,8 +234,7 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements, c->xparts = NULL; c->gparts = NULL; c->sparts = NULL; - if(s->gravity) - bzero(c->multipole, sizeof(struct gravity_tensors)); + if (s->gravity) bzero(c->multipole, sizeof(struct gravity_tensors)); for (int i = 0; i < 13; i++) if (c->sort[i] != NULL) { free(c->sort[i]); @@ -246,14 +245,14 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements, c->recv_rho = NULL; c->recv_gradient = NULL; c->recv_grav = NULL; - //c->recv_multipole = NULL; + // c->recv_multipole = NULL; c->recv_ti = NULL; c->send_xv = NULL; c->send_rho = NULL; c->send_gradient = NULL; c->send_grav = NULL; - //c->send_multipole = NULL; + // c->send_multipole = NULL; c->send_ti = NULL; #endif } @@ -266,7 +265,8 @@ void space_free_cells(struct space *s) { threadpool_map(&s->e->threadpool, space_rebuild_recycle_mapper, s->cells_top, s->nr_cells, sizeof(struct cell), 0, s); s->maxdepth = 0; - message("Done"); fflush(stdout); + message("Done"); + fflush(stdout); } /** @@ -2243,12 +2243,12 @@ void space_split_recursive(struct space *s, struct cell *c, c->multipole->r_max = sqrt(dx * dx + dy * dy + dz * dz); } else { gravity_multipole_init(&c->multipole->m_pole); - if(c->nodeID == engine_rank) { - c->multipole->CoM[0] = c->loc[0] + c->width[0] / 2.; - c->multipole->CoM[1] = c->loc[1] + c->width[1] / 2.; - c->multipole->CoM[2] = c->loc[2] + c->width[2] / 2.; - c->multipole->r_max = 0.; - } + if (c->nodeID == engine_rank) { + c->multipole->CoM[0] = c->loc[0] + c->width[0] / 2.; + c->multipole->CoM[1] = c->loc[1] + c->width[1] / 2.; + c->multipole->CoM[2] = c->loc[2] + c->width[2] / 2.; + c->multipole->r_max = 0.; + } } c->multipole->r_max_rebuild = c->multipole->r_max; c->multipole->CoM_rebuild[0] = c->multipole->CoM[0];