Commit 99ea8c60 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Refactor MPI sends to clean up code

parent ebbe6de3
......@@ -1726,166 +1726,105 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
break;
case task_type_send:
#ifdef WITH_MPI
if (t->subtype == task_subtype_tend_part) {
t->buff = (struct pcell_step_hydro *)malloc(
sizeof(struct pcell_step_hydro) * t->ci->mpi.pcell_size);
cell_pack_end_step_hydro(t->ci, (struct pcell_step_hydro *)t->buff);
if ((t->ci->mpi.pcell_size * sizeof(struct pcell_step_hydro)) >
s->mpi_message_limit) {
err = MPI_Isend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_hydro),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else {
err = MPI_Issend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_hydro),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
}
} else if (t->subtype == task_subtype_tend_gpart) {
t->buff = (struct pcell_step_grav *)malloc(
sizeof(struct pcell_step_grav) * t->ci->mpi.pcell_size);
cell_pack_end_step_grav(t->ci, (struct pcell_step_grav *)t->buff);
if ((t->ci->mpi.pcell_size * sizeof(struct pcell_step_grav)) >
s->mpi_message_limit) {
err = MPI_Isend(
t->buff, t->ci->mpi.pcell_size * sizeof(struct pcell_step_grav),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else {
err = MPI_Issend(
t->buff, t->ci->mpi.pcell_size * sizeof(struct pcell_step_grav),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
}
} else if (t->subtype == task_subtype_tend_spart) {
t->buff = (struct pcell_step_stars *)malloc(
sizeof(struct pcell_step_stars) * t->ci->mpi.pcell_size);
cell_pack_end_step_stars(t->ci, (struct pcell_step_stars *)t->buff);
if ((t->ci->mpi.pcell_size * sizeof(struct pcell_step_stars)) >
s->mpi_message_limit) {
err = MPI_Isend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_stars),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else {
err = MPI_Issend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_stars),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
}
} else if (t->subtype == task_subtype_tend_bpart) {
t->buff = (struct pcell_step_black_holes *)malloc(
sizeof(struct pcell_step_black_holes) * t->ci->mpi.pcell_size);
cell_pack_end_step_black_holes(
t->ci, (struct pcell_step_black_holes *)t->buff);
if ((t->ci->mpi.pcell_size * sizeof(struct pcell_step_black_holes)) >
s->mpi_message_limit) {
err = MPI_Isend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_black_holes),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else {
err = MPI_Issend(
t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_step_black_holes),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
}
} else if (t->subtype == task_subtype_part_swallow) {
t->buff = (struct black_holes_part_data *)malloc(
sizeof(struct black_holes_part_data) * t->ci->hydro.count);
cell_pack_part_swallow(t->ci,
(struct black_holes_part_data *)t->buff);
if (t->ci->hydro.count * sizeof(struct black_holes_part_data) >
s->mpi_message_limit) {
err = MPI_Isend(
t->buff,
t->ci->hydro.count * sizeof(struct black_holes_part_data),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
{
size_t size = 0; /* Size in bytes. */
size_t count = 0; /* Number of elements to send */
MPI_Datatype type = MPI_BYTE; /* Type of the elements */
void *buff = NULL; /* Buffer to send */
if (t->subtype == task_subtype_tend_part) {
size = count = t->ci->mpi.pcell_size * sizeof(struct pcell_step_hydro);
buff = t->buff = malloc(size);
cell_pack_end_step_hydro(t->ci, (struct pcell_step_hydro *)buff);
} else if (t->subtype == task_subtype_tend_gpart) {
size = count = t->ci->mpi.pcell_size * sizeof(struct pcell_step_grav);
buff = t->buff = malloc(size);
cell_pack_end_step_grav(t->ci, (struct pcell_step_grav *)buff);
} else if (t->subtype == task_subtype_tend_spart) {
size = count = t->ci->mpi.pcell_size * sizeof(struct pcell_step_stars);
buff = t->buff = malloc(size);
cell_pack_end_step_stars(t->ci, (struct pcell_step_stars *)buff);
} else if (t->subtype == task_subtype_tend_bpart) {
size = count = t->ci->mpi.pcell_size * sizeof(struct pcell_step_black_holes);
buff = t->buff = malloc(size);
cell_pack_end_step_black_holes(t->ci, (struct pcell_step_black_holes *)buff);
} else if (t->subtype == task_subtype_part_swallow) {
size = count = t->ci->hydro.count * sizeof(struct black_holes_part_data);
buff = t->buff = malloc(size);
cell_pack_part_swallow(t->ci, (struct black_holes_part_data *)buff);
} else if (t->subtype == task_subtype_xv ||
t->subtype == task_subtype_rho ||
t->subtype == task_subtype_gradient) {
count = t->ci->hydro.count;
size = count * sizeof(struct part);
type = part_mpi_type;
buff = t->ci->hydro.parts;
} else if (t->subtype == task_subtype_gpart) {
count = t->ci->grav.count;
size = count * sizeof(struct gpart);
type = gpart_mpi_type;
buff = t->ci->grav.parts;
} else if (t->subtype == task_subtype_spart) {
count = t->ci->stars.count;
size = count * sizeof(struct spart);
type = spart_mpi_type;
buff = t->ci->stars.parts;
} else if (t->subtype == task_subtype_bpart_rho ||
t->subtype == task_subtype_bpart_swallow ||
t->subtype == task_subtype_bpart_feedback) {
count = t->ci->black_holes.count;
size = count * sizeof(struct bpart);
type = bpart_mpi_type;
buff = t->ci->black_holes.parts;
} else if (t->subtype == task_subtype_multipole) {
count = t->ci->mpi.pcell_size;
size = count * sizeof(struct gravity_tensors);
type = multipole_mpi_type;
buff = t->buff = malloc(size);
cell_pack_multipoles(t->ci, (struct gravity_tensors *)buff);
} else if (t->subtype == task_subtype_sf_counts) {
size = count = t->ci->mpi.pcell_size * sizeof(struct pcell_sf);
buff = t->buff = malloc(size);
cell_pack_sf_counts(t->ci, (struct pcell_sf *)t->buff);
} else {
err = MPI_Issend(
t->buff,
t->ci->hydro.count * sizeof(struct black_holes_part_data),
MPI_BYTE, t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
error("Unknown communication sub-type");
}
} else if (t->subtype == task_subtype_xv ||
t->subtype == task_subtype_rho ||
t->subtype == task_subtype_gradient) {
if ((t->ci->hydro.count * sizeof(struct part)) > s->mpi_message_limit)
err = MPI_Isend(t->ci->hydro.parts, t->ci->hydro.count,
part_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
else
err = MPI_Issend(t->ci->hydro.parts, t->ci->hydro.count,
part_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_gpart) {
if ((t->ci->grav.count * sizeof(struct gpart)) > s->mpi_message_limit)
err = MPI_Isend(t->ci->grav.parts, t->ci->grav.count,
gpart_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
else
err = MPI_Issend(t->ci->grav.parts, t->ci->grav.count,
gpart_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_spart) {
if ((t->ci->stars.count * sizeof(struct spart)) >
s->mpi_message_limit)
err = MPI_Isend(t->ci->stars.parts, t->ci->stars.count,
spart_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
else
err = MPI_Issend(t->ci->stars.parts, t->ci->stars.count,
spart_mpi_type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_bpart_rho ||
t->subtype == task_subtype_bpart_swallow ||
t->subtype == task_subtype_bpart_feedback) {
if ((t->ci->black_holes.count * sizeof(struct bpart)) >
s->mpi_message_limit)
err = MPI_Isend(t->ci->black_holes.parts, t->ci->black_holes.count,
bpart_mpi_type, t->cj->nodeID, t->flags,
if (size > s->mpi_message_limit) {
err = MPI_Isend(buff, count, type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
else
err = MPI_Issend(t->ci->black_holes.parts, t->ci->black_holes.count,
bpart_mpi_type, t->cj->nodeID, t->flags,
} else {
err = MPI_Issend(buff, count, type, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else if (t->subtype == task_subtype_multipole) {
t->buff = (struct gravity_tensors *)malloc(
sizeof(struct gravity_tensors) * t->ci->mpi.pcell_size);
cell_pack_multipoles(t->ci, (struct gravity_tensors *)t->buff);
err = MPI_Isend(t->buff, t->ci->mpi.pcell_size, multipole_mpi_type,
t->cj->nodeID, t->flags, subtaskMPI_comms[t->subtype],
&t->req);
} else if (t->subtype == task_subtype_sf_counts) {
t->buff = (struct pcell_sf *)malloc(sizeof(struct pcell_sf) *
t->ci->mpi.pcell_size);
cell_pack_sf_counts(t->ci, (struct pcell_sf *)t->buff);
err = MPI_Isend(t->buff,
t->ci->mpi.pcell_size * sizeof(struct pcell_sf),
MPI_BYTE, t->cj->nodeID, t->flags,
subtaskMPI_comms[t->subtype], &t->req);
} else {
error("Unknown communication sub-type");
}
if (err != MPI_SUCCESS) {
mpi_error(err, "Failed to emit isend for particle data.");
}
if (err != MPI_SUCCESS) {
mpi_error(err, "Failed to emit isend for particle data.");
}
qid = 0;
}
qid = 0;
#else
error("SWIFT was not compiled with MPI support.");
#endif
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment