Commit 9ec48102 authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Added communication functions to propagate the new number of star particles...

Added communication functions to propagate the new number of star particles after a SF event has occured.
parent 67661f0e
......@@ -508,8 +508,6 @@ int main(int argc, char *argv[]) {
#ifdef WITH_MPI
if (with_mpole_reconstruction && nr_nodes > 1)
error("Cannot reconstruct m-poles every step over MPI (yet).");
if (with_star_formation && with_feedback)
error("Can't run with star formation and feedback over MPI (yet)");
if (with_limiter) error("Can't run with time-step limiter over MPI (yet)");
#endif
......
This diff is collapsed.
......@@ -271,6 +271,23 @@ enum cell_flags {
cell_flag_do_bh_sub_drift = (1UL << 12)
};
/**
* @brief Cell information to propagate the new counts of star particles.
*/
struct pcell_sf {
/*! Stars variables */
struct {
/* Distance by which the stars pointer has moved since the last rebuild */
ptrdiff_t delta_from_rebuild;
/* Number of particles in the cell */
int count;
} stars;
};
/**
* @brief Cell within the tree structure.
*
......@@ -521,6 +538,9 @@ struct cell {
/*! Pointer to the #spart data. */
struct spart *parts;
/*! Pointer to the #spart data at rebuild time. */
struct spart *parts_rebuild;
/*! The star ghost task itself */
struct task *ghost;
......@@ -802,6 +822,8 @@ int cell_unpack_end_step_black_holes(struct cell *c,
struct pcell_step_black_holes *pcell);
int cell_pack_multipoles(struct cell *c, struct gravity_tensors *m);
int cell_unpack_multipoles(struct cell *c, struct gravity_tensors *m);
int cell_pack_sf_counts(struct cell *c, struct pcell_sf *pcell);
int cell_unpack_sf_counts(struct cell *c, struct pcell_sf *pcell);
int cell_getsize(struct cell *c);
int cell_link_parts(struct cell *c, struct part *parts);
int cell_link_gparts(struct cell *c, struct gpart *gparts);
......
......@@ -2116,7 +2116,7 @@ void engine_allocate_foreign_particles(struct engine *e) {
}
/* For stars, we just use the numbers in the top-level cells */
count_sparts_in += e->proxies[k].cells_in[j]->stars.count;
count_sparts_in += e->proxies[k].cells_in[j]->stars.count + space_extra_sparts;
/* For black holes, we just use the numbers in the top-level cells */
count_bparts_in += e->proxies[k].cells_in[j]->black_holes.count;
......@@ -2207,7 +2207,7 @@ void engine_allocate_foreign_particles(struct engine *e) {
/* For stars, we just use the numbers in the top-level cells */
cell_link_sparts(e->proxies[k].cells_in[j], sparts);
sparts = &sparts[e->proxies[k].cells_in[j]->stars.count];
sparts = &sparts[e->proxies[k].cells_in[j]->stars.count + space_extra_sparts;
/* For black holes, we just use the numbers in the top-level cells */
cell_link_bparts(e->proxies[k].cells_in[j], bparts);
......@@ -3323,7 +3323,8 @@ void engine_skip_force_and_kick(struct engine *e) {
t->subtype == task_subtype_tend_gpart ||
t->subtype == task_subtype_tend_spart ||
t->subtype == task_subtype_tend_bpart ||
t->subtype == task_subtype_rho || t->subtype == task_subtype_gpart)
t->subtype == task_subtype_rho || t->subtype == task_subtype_gpart ||
t->subtype == task_subtype_sf_counts)
t->skip = 1;
}
......
......@@ -236,7 +236,8 @@ void engine_addtasks_send_hydro(struct engine *e, struct cell *ci,
*/
void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
struct cell *cj, struct task *t_feedback,
struct task *t_ti) {
struct task *t_sf_counts, struct task *t_ti,
const int with_star_formation) {
#ifdef WITH_MPI
......@@ -262,6 +263,11 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
t_feedback = scheduler_addtask(s, task_type_send, task_subtype_spart,
ci->mpi.tag, 0, ci, cj);
if (with_star_formation && ci->hydro.count > 0) {
t_sf_counts = scheduler_addtask(
s, task_type_send, task_subtype_sf_counts, ci->mpi.tag, 0, ci, cj);
}
t_ti = scheduler_addtask(s, task_type_send, task_subtype_tend_spart,
ci->mpi.tag, 0, ci, cj);
......@@ -274,18 +280,24 @@ void engine_addtasks_send_stars(struct engine *e, struct cell *ci,
/* Drift before you send */
scheduler_addunlock(s, ci->hydro.super->stars.drift, t_feedback);
scheduler_addunlock(s, ci->super->timestep, t_ti);
}
/* Update the stars counts before you send them */
if (with_star_formation && ci->hydro.count > 0) {
scheduler_addunlock(s, ci->top->hydro.star_formation, t_sf_counts);
}
engine_addlink(e, &ci->mpi.send, t_feedback);
engine_addlink(e, &ci->mpi.send, t_ti);
if (with_star_formation) {
engine_addlink(e, &ci->mpi.send, t_sf_counts);
}
}
/* Recurse? */
if (ci->split)
for (int k = 0; k < 8; k++)
if (ci->progeny[k] != NULL)
engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback, t_ti);
engine_addtasks_send_stars(e, ci->progeny[k], cj, t_feedback,
t_sf_counts, t_ti, with_star_formation);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -462,7 +474,9 @@ void engine_addtasks_recv_hydro(struct engine *e, struct cell *c,
* @param t_ti The recv_ti_end #task, if it has already been created.
*/
void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
struct task *t_feedback, struct task *t_ti) {
struct task *t_feedback,
struct task *t_sf_counts, struct task *t_ti,
const int with_star_formation) {
#ifdef WITH_MPI
struct scheduler *s = &e->sched;
......@@ -481,11 +495,22 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
t_ti = scheduler_addtask(s, task_type_recv, task_subtype_tend_spart,
c->mpi.tag, 0, c, NULL);
if (with_star_formation && c->hydro.count > 0) {
t_sf_counts = scheduler_addtask(s, task_type_recv, task_subtype_sf_counts,
c->mpi.tag, 0, c, NULL);
/* Receive the stars only once the counts have been received */
scheduler_addunlock(s, t_sf_counts, t_feedback);
}
}
if (t_feedback != NULL) {
engine_addlink(e, &c->mpi.recv, t_feedback);
engine_addlink(e, &c->mpi.recv, t_ti);
if (with_star_formation && c->hydro.count > 0) {
engine_addlink(e, &c->mpi.recv, t_sf_counts);
}
#ifdef SWIFT_DEBUG_CHECKS
if (c->nodeID == e->nodeID) error("Local cell!");
......@@ -507,7 +532,8 @@ void engine_addtasks_recv_stars(struct engine *e, struct cell *c,
if (c->split)
for (int k = 0; k < 8; k++)
if (c->progeny[k] != NULL)
engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_ti);
engine_addtasks_recv_stars(e, c->progeny[k], t_feedback, t_sf_counts,
t_ti, with_star_formation);
#else
error("SWIFT was not compiled with MPI support.");
......@@ -614,6 +640,7 @@ void engine_addtasks_recv_gravity(struct engine *e, struct cell *c,
scheduler_addunlock(s, l->t, t_ti);
}
}
/* Recurse? */
if (c->split)
for (int k = 0; k < 8; k++)
......@@ -2419,6 +2446,9 @@ void engine_addtasks_send_mapper(void *map_data, int num_elements,
struct cell *cj = cell_type_pairs[k].cj;
const int type = cell_type_pairs[k].type;
/* Add the send task for the particle timesteps. */
// engine_addtasks_send_timestep(e, ci, cj, NULL, NULL, with_limiter);
/* Add the send tasks for the cells in the proxy that have a hydro
* connection. */
if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
......@@ -2431,13 +2461,6 @@ void engine_addtasks_send_mapper(void *map_data, int num_elements,
if ((e->policy & engine_policy_feedback) && (type & proxy_cell_type_hydro))
engine_addtasks_send_stars(e, ci, cj, /*t_feedback=*/NULL, /*t_ti=*/NULL);
/* Add the send tasks for the cells in the proxy that have a black holes
* connection. */
if ((e->policy & engine_policy_black_holes) &&
(type & proxy_cell_type_hydro))
engine_addtasks_send_black_holes(e, ci, cj, /*t_feedback=*/NULL,
/*t_ti=*/NULL);
/* Add the send tasks for the cells in the proxy that have a gravity
* connection. */
if ((e->policy & engine_policy_self_gravity) &&
......@@ -2457,6 +2480,9 @@ void engine_addtasks_recv_mapper(void *map_data, int num_elements,
struct cell *ci = cell_type_pairs[k].ci;
const int type = cell_type_pairs[k].type;
/* Add the recv task for the particle timesteps. */
// engine_addtasks_recv_timestep(e, ci, NULL, NULL, with_limiter);
/* Add the recv tasks for the cells in the proxy that have a hydro
* connection. */
if ((e->policy & engine_policy_hydro) && (type & proxy_cell_type_hydro))
......@@ -2467,11 +2493,6 @@ void engine_addtasks_recv_mapper(void *map_data, int num_elements,
if ((e->policy & engine_policy_feedback) && (type & proxy_cell_type_hydro))
engine_addtasks_recv_stars(e, ci, NULL, NULL);
/* Add the recv tasks for the cells in the proxy that have a black holes
* connection. */
if ((e->policy & engine_policy_feedback) && (type & proxy_cell_type_hydro))
engine_addtasks_recv_black_holes(e, ci, NULL, NULL);
/* Add the recv tasks for the cells in the proxy that have a gravity
* connection. */
if ((e->policy & engine_policy_self_gravity) &&
......
......@@ -70,6 +70,10 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
struct engine *e = (struct engine *)((size_t *)extra_data)[0];
const int nodeID = e->nodeID;
const int with_limiter = e->policy & engine_policy_limiter;
#ifdef WITH_MPI
const int with_star_formation = e->policy & engine_policy_star_formation;
const int with_feedback = e->policy & engine_policy_feedback;
#endif
for (int ind = 0; ind < num_elements; ind++) {
......@@ -492,6 +496,17 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate_send(s, cj->mpi.send, task_subtype_tend_part,
ci_nodeID);
/* Propagating new star counts? */
if (with_star_formation && with_feedback) {
if (ci_active_hydro && ci->hydro.count > 0) {
scheduler_activate_recv(s, ci->mpi.recv, task_subtype_sf_counts);
}
if (cj_active_hydro && cj->hydro.count > 0) {
scheduler_activate_send(s, cj->mpi.send, task_subtype_sf_counts,
ci_nodeID);
}
}
} else if (cj_nodeID != nodeID) {
/* If the local cell is active, receive data from the foreign cell. */
......@@ -538,6 +553,17 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
if (ci_active_hydro)
scheduler_activate_send(s, ci->mpi.send, task_subtype_tend_part,
cj_nodeID);
/* Propagating new star counts? */
if (with_star_formation && with_feedback) {
if (cj_active_hydro && cj->hydro.count > 0) {
scheduler_activate_recv(s, cj->mpi.recv, task_subtype_sf_counts);
}
if (ci_active_hydro && ci->hydro.count > 0) {
scheduler_activate_send(s, ci->mpi.send, task_subtype_sf_counts,
cj_nodeID);
}
}
}
#endif
}
......
......@@ -4162,14 +4162,11 @@ void *runner_main(void *data) {
case task_type_send:
if (t->subtype == task_subtype_tend_part) {
free(t->buff);
}
if (t->subtype == task_subtype_tend_gpart) {
} else if (t->subtype == task_subtype_tend_gpart) {
free(t->buff);
}
if (t->subtype == task_subtype_tend_spart) {
} else if (t->subtype == task_subtype_tend_spart) {
free(t->buff);
}
if (t->subtype == task_subtype_tend_bpart) {
} else if (t->subtype == task_subtype_sf_counts) {
free(t->buff);
}
break;
......@@ -4187,6 +4184,10 @@ void *runner_main(void *data) {
cell_unpack_end_step_black_holes(
ci, (struct pcell_step_black_holes *)t->buff);
free(t->buff);
} else if (t->subtype == task_subtype_sf_counts) {
cell_unpack_sf_counts(ci, (struct pcell_sf *)t->buff);
cell_clear_stars_sort_flags(ci);
free(t->buff);
} else if (t->subtype == task_subtype_xv) {
runner_do_recv_part(r, ci, 1, 1);
} else if (t->subtype == task_subtype_rho) {
......
......@@ -1591,6 +1591,13 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
err = MPI_Irecv(t->buff, t->ci->mpi.pcell_size, multipole_mpi_type,
t->ci->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else if (t->subtype == task_subtype_sf_counts) {
t->buff = (struct pcell_sf *)malloc(sizeof(struct pcell_sf) *
t->ci->mpi.pcell_size);
err = MPI_Irecv(t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_sf),
MPI_BYTE, t->ci->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else {
error("Unknown communication sub-type");
}
......@@ -1726,6 +1733,14 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
err = MPI_Isend(t->buff, t->ci->mpi.pcell_size, multipole_mpi_type,
t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else if (t->subtype == task_subtype_sf_counts) {
t->buff = (struct pcell_sf *)malloc(sizeof(struct pcell_sf) *
t->ci->mpi.pcell_size);
cell_pack_sf_counts(t->ci, (struct pcell_sf *)t->buff);
err = MPI_Isend(t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_sf),
MPI_BYTE, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else {
error("Unknown communication sub-type");
}
......
......@@ -264,6 +264,7 @@ void space_rebuild_recycle_mapper(void *map_data, int num_elements,
c->hydro.xparts = NULL;
c->grav.parts = NULL;
c->stars.parts = NULL;
c->stars.parts_rebuild = NULL;
c->black_holes.parts = NULL;
c->flags = 0;
c->hydro.ti_end_min = -1;
......@@ -1870,6 +1871,9 @@ void space_rebuild(struct space *s, int repartitioned, int verbose) {
c->stars.parts = sfinger;
c->black_holes.parts = bfinger;
/* Store the state at rebuild time */
c->stars.parts_rebuild = c->stars.parts;
c->hydro.count_total = c->hydro.count + space_extra_parts;
c->grav.count_total = c->grav.count + space_extra_gparts;
c->stars.count_total = c->stars.count + space_extra_sparts;
......
......@@ -99,7 +99,7 @@ const char *subtaskID_names[task_subtype_count] = {
"limiter", "grav", "external_grav", "tend_part",
"tend_gpart", "tend_spart", "tend_bpart", "xv",
"rho", "gpart", "multipole", "spart",
"stars_density", "stars_feedback", "bpart", "bh_density",
"stars_density", "stars_feedback", "sf_count", "bpart", "bh_density",
"bh_feedback"};
#ifdef WITH_MPI
......@@ -403,9 +403,8 @@ void task_unlock(struct task *t) {
if (subtype == task_subtype_grav) {
cell_gunlocktree(ci);
cell_munlocktree(ci);
} else if (subtype == task_subtype_stars_density) {
cell_sunlocktree(ci);
} else if (subtype == task_subtype_stars_feedback) {
} else if ((subtype == task_subtype_stars_density) ||
(subtype == task_subtype_stars_feedback)) {
cell_sunlocktree(ci);
cell_unlocktree(ci);
} else {
......@@ -420,10 +419,8 @@ void task_unlock(struct task *t) {
cell_gunlocktree(cj);
cell_munlocktree(ci);
cell_munlocktree(cj);
} else if (subtype == task_subtype_stars_density) {
cell_sunlocktree(ci);
cell_sunlocktree(cj);
} else if (subtype == task_subtype_stars_feedback) {
} else if ((subtype == task_subtype_stars_density) ||
(subtype == task_subtype_stars_feedback)) {
cell_sunlocktree(ci);
cell_sunlocktree(cj);
cell_unlocktree(ci);
......@@ -540,10 +537,8 @@ int task_lock(struct task *t) {
cell_gunlocktree(ci);
return 0;
}
} else if (subtype == task_subtype_stars_density) {
if (ci->stars.hold) return 0;
if (cell_slocktree(ci) != 0) return 0;
} else if (subtype == task_subtype_stars_feedback) {
} else if ((subtype == task_subtype_stars_density) ||
(subtype == task_subtype_stars_feedback)) {
if (ci->stars.hold) return 0;
if (ci->hydro.hold) return 0;
if (cell_slocktree(ci) != 0) return 0;
......@@ -576,14 +571,8 @@ int task_lock(struct task *t) {
cell_munlocktree(ci);
return 0;
}
} else if (subtype == task_subtype_stars_density) {
if (ci->stars.hold || cj->stars.hold) return 0;
if (cell_slocktree(ci) != 0) return 0;
if (cell_slocktree(cj) != 0) {
cell_sunlocktree(ci);
return 0;
}
} else if (subtype == task_subtype_stars_feedback) {
} else if ((subtype == task_subtype_stars_density) ||
(subtype == task_subtype_stars_feedback)) {
/* Lock the stars and the gas particles in both cells */
if (ci->stars.hold || cj->stars.hold) return 0;
if (ci->hydro.hold || cj->hydro.hold) return 0;
......
......@@ -112,6 +112,7 @@ enum task_subtypes {
task_subtype_spart,
task_subtype_stars_density,
task_subtype_stars_feedback,
task_subtype_sf_counts,
task_subtype_bpart,
task_subtype_bh_density,
task_subtype_bh_feedback,
......
......@@ -162,6 +162,9 @@ def taskIsStars(name):
"""
if "stars" in name or "spart" in name:
return True
if "sf_counts" in name:
return True
return False
def taskIsHydro(name):
......
......@@ -128,6 +128,7 @@ SUBTYPES = [
"spart",
"stars_density",
"stars_feedback",
"sf_counts"
"bh_density",
"bh_feedback",
"count",
......
......@@ -213,6 +213,7 @@ SUBTYPES = [
"spart",
"stars_density",
"stars_feedback",
"sf_counts",
"bh_density",
"bh_feedback",
"count",
......@@ -252,6 +253,8 @@ FULLTYPES = [
"send/gpart",
"recv/spart",
"send/spart",
"send/sf_counts",
"recv/sf_counts",
"self/stars_density",
"pair/stars_density",
"sub_self/stars_density",
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment